Back to home page

EIC code displayed by LXR

 
 

    


Warning, /harvester/templates/panda/panda_harvester.cfg.rpmnew.template is written in an unsupported language. File is not indexed.

0001 #########################
0002 #
0003 # Master parameters
0004 #
0005 
0006 [master]
0007 
0008 # user name of the daemon process
0009 uname = FIXME
0010 
0011 # group name of the daemon process
0012 gname = FIXME
0013 
0014 # logger name
0015 loggername = harvester
0016 
0017 # harvester id - unique id as registered also in panda server
0018 harvester_id = FIXME
0019 
0020 # port number of debugger
0021 debugger_port = 19550
0022 
0023 # capability to dynamically change plugins
0024 dynamic_plugin_change = False
0025 
0026 
0027 
0028 
0029 ##########################
0030 #
0031 # Database parameters
0032 #
0033 
0034 [db]
0035 
0036 # verbose
0037 verbose = False
0038 
0039 # use inspect for decorator of verbose messages
0040 useInspect = False
0041 
0042 # number of database connections in each process
0043 nConnections = 10
0044 
0045 # database engine : sqlite or mariadb
0046 engine = sqlite
0047 
0048 # database filename for sqlite. Better to use local disk if possible since sqlite doesn't like NAS
0049 database_filename = FIXME
0050 
0051 # use MySQLdb for mariadb access
0052 useMySQLdb = False
0053 
0054 # user name for MariaDB. N/A for sqlite
0055 user = harvester
0056 
0057 # password for MariaDB. N/A for sqlite
0058 password = FIXME
0059 
0060 # schema for MariaDB. N/A for sqlite
0061 schema = HARVESTER
0062 
0063 # host name for MariaDB. N/A for sqlite
0064 host = localhost
0065 
0066 # port number for MariaDB. N/A for sqlite
0067 port =  3306
0068 
0069 # max time in seconds to keep trying to reconnect DB before timeout
0070 reconnectTimeout = 300
0071 
0072 # synchronize max workerID when starting up
0073 syncMaxWorkerID = False
0074 
0075 
0076 
0077 
0078 ##########################
0079 #
0080 # FIFO parameters
0081 #
0082 
0083 [fifo]
0084 
0085 # module and class names to provide the fifo queue
0086 fifoModule = pandaharvester.harvesterfifo.sqlite_fifo
0087 fifoClass = SqliteFifo
0088 
0089 # database filename for sqlite fifo plugin
0090 # must be different from main Harvester DB and other fifo DBs if using sqlite
0091 # different db filenames for fifos of different titles
0092 # a titile can be named after a harvester agent (e.g. monitor) or other special components
0093 # placeholder $(TITLE) should be used in filename; it will then be changed to the title name
0094 database_filename = /dev/shm/$(TITLE)_fifo.db
0095 
0096 
0097 
0098 
0099 ##########################
0100 #
0101 # Communicator parameters
0102 #
0103 
0104 [communicator]
0105 
0106 # module name of Communicator
0107 moduleName = pandaharvester.harvestercommunicator.panda_communicator
0108 
0109 # class name of Communicator
0110 className = PandaCommunicator
0111 
0112 # number of connections
0113 nConnections = 5
0114 
0115 
0116 
0117 
0118 ##########################
0119 #
0120 # Panda Connection parameters
0121 # (required only when PandaCommunicator is used to communicate with WMS)
0122 #
0123 
0124 [pandacon]
0125 
0126 # timeout
0127 timeout = 180
0128 
0129 # auth type x509 (default) or oidc
0130 auth_type = x509
0131 
0132 # CA file
0133 ca_cert = /etc/pki/tls/certs/CERN-bundle.pem
0134 
0135 # certificate for x509
0136 cert_file = FIXME
0137 
0138 # key for x509
0139 key_file = FIXME
0140 
0141 # token for oidc. bare string or filename (file:/path)
0142 auth_token = FIXME
0143 
0144 # origin for oidc
0145 auth_origin = FIXME
0146 
0147 # base URL via http
0148 pandaURL = http://pandaserver.cern.ch:25080/server/panda
0149 server_api_url = http://pandaserver.cern.ch:25080/api/v1
0150 
0151 # base URL via https
0152 pandaURLSSL = https://pandaserver.cern.ch:25443/server/panda
0153 server_api_url_ssl = https://pandaserver.cern.ch:25443/api/v1
0154 
0155 # base URL for write access to log cache server
0156 pandaCacheURL_W = https://aipanda011.cern.ch:25443/server/panda
0157 cache_api_url_ssl = https://pandacache.cern.ch:25443/api/v1
0158 
0159 # base URL for read access to log cache server
0160 pandaCacheURL_R = https://aipanda011.cern.ch:25443/cache
0161 
0162 # verbose
0163 verbose = False
0164 
0165 # use inspect for decorator of verbose messages
0166 useInspect = False
0167 
0168 # event size when getting events
0169 getEventsChunkSize = 5120
0170 
0171 # configuration file to support multiple auth types with various hosts: a json dump of
0172 # {"host:port": {"auth_type": "x509 or oidc", "cert_file": /path/to/cert, "key_file": /path/to/key,
0173 #                "ca_cert": /path/to/ca_cert, "auth_token": "token or file:/path/to/token"},
0174 #  ...}
0175 # the default values specified in cfg are used if the host:port or keys are not found in the json
0176 
0177 #multihost_auth_config = /path/to/multihost_auth_config.json
0178 
0179 
0180 ##########################
0181 #
0182 # Queue Config parameters
0183 #
0184 
0185 [qconf]
0186 
0187 # config file
0188 configFile = panda_queueconfig.json
0189 
0190 # enable config from cacher "queues_config_file"
0191 configFromCacher = False
0192 
0193 # queue list : one queue name following a whitespace per line. Or just ALL if all queues in the configFile are used
0194 queueList =
0195  FIXME_1
0196  FIXME_2
0197 
0198 # module and class names to resolve queue names to panda queue names
0199 resolverModule = pandaharvester.harvestermisc.info_utils
0200 resolverClass = PandaQueuesDict
0201 
0202 # configuration of the resolver in JSON
0203 #resolverConfig =
0204 #  {
0205 #    "refreshPeriod": 300
0206 #  }
0207 
0208 # enable auto-blacklisting of resolver which returns status='offline' to blacklist the queue
0209 autoBlacklist = False
0210 
0211 # restrict to a certain pilot version (optional)
0212 #pilotVersion = 2
0213 
0214 # update interval in sec (default: 600) - period to update qconf
0215 updateInterval = 600
0216 
0217 # check interval in sec (default: 5) - period for other agent threads to check last qconf update
0218 checkInterval = 5
0219 
0220 
0221 ##########################
0222 #
0223 # Command manager parameters
0224 #
0225 [commandmanager]
0226 
0227 # bulk size for panda server interactions
0228 commands_bulk_size = 20
0229 
0230 # sleep interval in sec
0231 sleepTime = 5
0232 
0233 
0234 
0235 
0236 ##########################
0237 #
0238 # Job Fetcher parameters
0239 #
0240 
0241 [jobfetcher]
0242 
0243 # number of threads
0244 nThreads = 3
0245 
0246 # number of queues to fetch jobs in one cycle
0247 nQueues = 5
0248 
0249 # max number of jobs in one cycle
0250 maxJobs = 500
0251 
0252 # lookup interval in sec
0253 lookupTime = 60
0254 
0255 # sleep interval in sec
0256 sleepTime = 60
0257 
0258 
0259 
0260 
0261 ##########################
0262 #
0263 # Propagator parameters
0264 #
0265 
0266 [propagator]
0267 
0268 # number of threads
0269 nThreads = 3
0270 
0271 # max number of jobs to update in one cycle
0272 maxJobs = 100
0273 
0274 # number of jobs in bulk update
0275 nJobsInBulk = 100
0276 
0277 # max number of workers to update in one cycle
0278 maxWorkers = 100
0279 
0280 # number of workers in bulk update
0281 nWorkersInBulk = 100
0282 
0283 # number of dialog message to send
0284 maxDialogs = 50
0285 
0286 # minimum level of dialog messages to send. INFO, WARNING, or ERROR
0287 minMessageLevel = WARNING
0288 
0289 # lock interval in sec
0290 lockInterval = 600
0291 
0292 # update interval in sec
0293 updateInterval = 1800
0294 
0295 # sleep interval in sec
0296 sleepTime = 60
0297 
0298 
0299 
0300 
0301 ##########################
0302 #
0303 # Preparator parameters
0304 #
0305 
0306 [preparator]
0307 
0308 # number of threads
0309 nThreads = 3
0310 
0311 # max number of jobs to check in one cycle
0312 maxJobsToCheck = 100
0313 
0314 # max number of jobs to trigger in one cycle
0315 maxJobsToTrigger = 100
0316 
0317 # max number of files per job to check in one cycle : 0 to be unlimited
0318 maxFilesPerJobToCheck = 0
0319 
0320 # max number of files per job to prepare in one cycle : 0 to be unlimited
0321 maxFilesPerJobToPrepare = 0
0322 
0323 # lock interval in sec
0324 lockInterval = 600
0325 
0326 # check interval in sec
0327 checkInterval = 180
0328 
0329 # trigger interval in sec
0330 triggerInterval = 180
0331 
0332 # sleep interval in sec
0333 sleepTime = 60
0334 
0335 
0336 
0337 
0338 ##########################
0339 #
0340 # Submitter parameters
0341 #
0342 
0343 [submitter]
0344 
0345 # number of threads
0346 nThreads = 3
0347 
0348 # max number of queues to try in one cycle
0349 nQueues = 3
0350 
0351 # interval for queue lookup
0352 lookupTime = 60
0353 
0354 # interval for queue lock
0355 queueLockInterval = 300
0356 
0357 # lock interval in sec
0358 lockInterval = 600
0359 
0360 # check interval in sec
0361 checkInterval = 60
0362 
0363 # minimum interval in sec between submissions to the same queue
0364 minSubmissionInterval = 0
0365 
0366 # sleep interval in sec
0367 sleepTime = 60
0368 
0369 # max number of workers per queue to try in one cycle
0370 maxNewWorkers = 1000
0371 
0372 # respect sleep time
0373 respectSleepTime = False
0374 
0375 # factor to adjust workers
0376 #activateWorkerFactor = auto
0377 
0378 ##########################
0379 #
0380 # Monitor parameters
0381 #
0382 
0383 [monitor]
0384 
0385 # number of threads
0386 nThreads = 3
0387 
0388 # max number of workers to try in one cycle
0389 maxWorkers = 500
0390 
0391 # lock interval in sec
0392 lockInterval = 600
0393 
0394 # check interval in sec
0395 checkInterval = 300
0396 
0397 # timeout in sec to give up checking if it keeps failing
0398 checkTimeout = 3600
0399 
0400 # sleep interval in sec
0401 sleepTime = 600
0402 
0403 # whether to use fifo
0404 fifoEnable = False
0405 
0406 # sleep interval in millisecond using fifo
0407 fifoSleepTimeMilli = 15000
0408 
0409 # check interval in fifo in sec
0410 fifoCheckInterval = 120
0411 
0412 # check duration of a fifo cycle in sec
0413 fifoCheckDuration = 60
0414 
0415 # interval of force enqueue in sec
0416 fifoForceEnqueueInterval = 1500
0417 
0418 # max number of workers to be populated into fifo
0419 #fifoMaxWorkersToPopulate = 100000
0420 
0421 # max number of workers in a chunk to enqueue
0422 fifoMaxWorkersPerChunk = 500
0423 
0424 # max interval in sec a post-processing worker can preempt in fifo
0425 fifoMaxPreemptInterval = 60
0426 
0427 # plugin cache parameters (used if monitor plugin supports)
0428 #pluginCacheEnable = True
0429 #pluginCacheRefreshInterval = 300
0430 
0431 # workers will be killed if stuck queuing (submitted) for longer than this
0432 workerQueueTimeLimit = 172800
0433 
0434 # enable event-based monitor check. Only works when fifoEnable is True
0435 eventBasedEnable = False
0436 
0437 # list of plugins for event-based check. Mandatory when eventBasedEnable is True
0438 #eventBasedPlugins =
0439 #  [
0440 #    {
0441 #      "module": "pandaharvester.harvestermonitor.htcondor_monitor",
0442 #      "name": "HTCondorMonitor",
0443 #      "condorHostConfig_list": [
0444 #          "/opt/harvester/etc/panda/condor_host_config.json"
0445 #        ]
0446 #    }
0447 #  ]
0448 
0449 # interval of event-based check to query with plugin, in sec
0450 #eventBasedCheckInterval = 300
0451 
0452 # time window of event-based check to check within, in sec
0453 #eventBasedTimeWindow = 450
0454 
0455 # max number of events of event-based check to handle in one cycle
0456 #eventBasedCheckMaxEvents = 500
0457 
0458 # lifetime of an event in event-based check, in sec
0459 #eventBasedEventLifetime = 1800
0460 
0461 # max number of expired events to remove in one cycle
0462 #eventBasedRemoveMaxEvents = 2000
0463 
0464 # timeout for post-processing in minutes. 0 to give up immediately
0465 postProcessTimeout = 0
0466 
0467 
0468 
0469 
0470 
0471 ##########################
0472 #
0473 # Credential Manager parameters
0474 #
0475 # Notes : This is an example to manage two credentials, one with production role and the other with pilot role.
0476 #         One credential data following a whitespace per line. Empty lines are not allowed, so that a dummy string
0477 #         like 'dummy' needs to be added if some parameters like voms are unnecessary.
0478 
0479 [credmanager]
0480 
0481 # module name
0482 moduleName =
0483  pandaharvester.harvestercredmanager.no_voms_cred_manager
0484  pandaharvester.harvestercredmanager.no_voms_cred_manager
0485 
0486 # class name
0487 className =
0488  NoVomsCredManager
0489  NoVomsCredManager
0490 
0491 # original certificate file to generate new short-lived certificate
0492 inCertFile =
0493  /path_to/FIXME_original_cert_for_proxy_with_production_role
0494  /path_to/FIXME_original_cert_for_proxy_with_pilot_role
0495 
0496 # the name of short-lived certificate
0497 outCertFile =
0498  /path_to/FIXME_proxy_production
0499  /path_to/FIXME_proxy_pilot
0500 
0501 # voms
0502 voms =
0503  atlas:/atlas/Role=production
0504  atlas:/atlas/Role=pilot
0505 
0506 # plugin configs in json
0507 # pluginConfigs =
0508 #   [
0509 #     {
0510 #       "module": "pandaharvester.harvestercredmanager.no_voms_cred_manager",
0511 #       "name": "NoVomsCredManager",
0512 #       "configs": {
0513 #         "production": {
0514 #          "inCertFile": "/data/atlpan/proxy/atlpilo1RFC.plain",
0515 #          "outCertFile": "/data/atlpan/proxy/x509up_u25606_prod",
0516 #          "voms": "atlas:/atlas/Role=production"
0517 #         },
0518 #         "pilot": {
0519 #           "inCertFile": "/data/atlpan/proxy/atlpilo1RFC.plain",
0520 #           "outCertFile": "/data/atlpan/proxy/x509up_u25606_pilot",
0521 #           "voms": "atlas:/atlas/Role=pilot"
0522 #         }
0523 #       }
0524 #     }
0525 #   ]
0526 
0527 
0528 # sleep interval in sec
0529 sleepTime = 1800
0530 
0531 
0532 
0533 
0534 
0535 ##########################
0536 #
0537 # Stager parameters
0538 #
0539 
0540 [stager]
0541 
0542 # number of threads
0543 nThreads = 3
0544 
0545 # max number of jobs to check in one cycle
0546 maxJobsToCheck = 100
0547 
0548 # max number of jobs to trigger in one cycle
0549 maxJobsToTrigger = 100
0550 
0551 # max number of jobs to zip in one cycle : OBSOLETE, should be set in [zipper]
0552 maxJobsToZip = 100
0553 
0554 # max number of files per job to check in one cycle : 0 to be unlimited
0555 maxFilesPerJobToCheck = 0
0556 
0557 # max number of files per job to trigger stage-out in one cycle : 0 to be unlimited
0558 maxFilesPerJobToTrigger = 0
0559 
0560 # max number of files per job to zip in one cycle : 0 to be unlimited : OBSOLETE, should be set in [zipper]
0561 maxFilesPerJobToZip = 0
0562 
0563 # use two staged zipping : OBSOLETE, should be set in [zipper]
0564 usePostZipping = False
0565 
0566 # lock interval in sec
0567 lockInterval = 600
0568 
0569 # check interval in sec
0570 checkInterval = 180
0571 
0572 # trigger interval in sec
0573 triggerInterval = 180
0574 
0575 # zip interval in sec : OBSOLETE, should be set in [zipper]
0576 zipInterval = 180
0577 
0578 # number of threads for zip making : OBSOLETE, should be set in [zipper]
0579 nThreadsForZip = 4
0580 
0581 # sleep interval in sec
0582 sleepTime = 60
0583 
0584 
0585 
0586 
0587 
0588 ##########################
0589 #
0590 # Zipper parameters
0591 #
0592 
0593 [zipper]
0594 
0595 # max number of jobs to zip in one cycle
0596 maxJobsToZip = 100
0597 
0598 # max number of files per job to zip in one cycle : 0 to be unlimited
0599 maxFilesPerJobToZip = 0
0600 
0601 # use two staged zipping
0602 usePostZipping = False
0603 
0604 # lock interval in sec
0605 lockInterval = 600
0606 
0607 # zip interval in sec
0608 zipInterval = 180
0609 
0610 # number of threads for zip making
0611 nThreadsForZip = 4
0612 
0613 
0614 
0615 
0616 
0617 ##########################
0618 #
0619 # EventFeeder parameters
0620 #
0621 
0622 [eventfeeder]
0623 
0624 # number of threads
0625 nThreads = 3
0626 
0627 # max number of workers to try in one cycle
0628 maxWorkers = 500
0629 
0630 # lock interval in sec
0631 lockInterval = 600
0632 
0633 # sleep interval in sec
0634 sleepTime = 60
0635 
0636 
0637 
0638 
0639 ##########################
0640 #
0641 # Cacher parameters
0642 #
0643 
0644 [cacher]
0645 
0646 # one data ( main_key_name|sub_key_name|URL|dump_file(optional) ) following a white space per line
0647 #
0648 # Notes: This example is for five data. ddm_endpoints and panda_queues json files are retrieved using http.
0649 #        It also caches proxy files which are renewed by Credential Manager. Access key for BNL object store
0650 #        is retrieved from panda.
0651 data =
0652  ddmendpoints_objectstores.json||https://atlas-cric.cern.ch/api/atlas/ddmendpoint/query/?json&state=ACTIVE&site_state=ACTIVE&preset=dict&json_pretty=1&type[]=OS_LOGS&type[]=OS_ES
0653  panda_queues.json||https://atlas-cric.cern.ch/api/atlas/pandaqueue/query/?json
0654  agis_ddmendpoints.json||https://atlas-cric.cern.ch/api/atlas/ddmendpoint/query/list/?json&state=ACTIVE&site_state=ACTIVE&preset=dict&json_pretty=1
0655  proxy_pilot||file://path_to/FIXME_proxy_pilot
0656  proxy_production||file://path_to/FIXME_proxy_production
0657  resource_types.json||panda_server:get_resource_types
0658  job_statistics.json||panda_server:get_job_stats
0659  job_statistics_new.json||panda_server:get_job_stats_new
0660  worker_statistics.json||panda_server:get_worker_stats_from_panda
0661 # BNL_key||panda_cache:BNL_ObjectStoreKey.pub&BNL_ObjectStoreKey
0662 # globus_secret||panda_cache:GlobusClientID_1&GlobusRefreshToken_1
0663 
0664 # refresh interval in minint
0665 refreshInterval = 10
0666 
0667 # sleep interval in sec
0668 sleepTime = 60
0669 
0670 
0671 
0672 
0673 
0674 ##########################
0675 #
0676 # Payload interaction parameters
0677 #
0678 
0679 [payload_interaction]
0680 
0681 # worker attributes
0682 workerAttributesFile = worker_attributes.json
0683 
0684 # job report
0685 jobReportFile = jobReport.json
0686 
0687 # event status dump file in json
0688 eventStatusDumpJsonFile = event_status.dump.json
0689 
0690 # event status dump file in xml
0691 eventStatusDumpXmlFile = _event_status.dump
0692 
0693 # job request
0694 jobRequestFile = worker_requestjob.json
0695 
0696 # job spec file
0697 jobSpecFile = HPCJobs.json
0698 
0699 # event request
0700 eventRequestFile = worker_requestevents.json
0701 
0702 # event ranges file
0703 eventRangesFile = JobsEventRanges.json
0704 
0705 # update events
0706 updateEventsFile = worker_updateevents.json
0707 
0708 # PFC for input files
0709 xmlPoolCatalogFile = PoolFileCatalog_H.xml
0710 
0711 # get PandaIDs
0712 pandaIDsFile = worker_pandaids.json
0713 
0714 # request to be killed
0715 killWorkerFile = kill_worker.json
0716 
0717 # heartbeat from worker
0718 heartbeatFile = worker_heartbeat.json
0719 
0720 
0721 
0722 ##########################
0723 #
0724 # Front-end parameters
0725 #
0726 
0727 [frontend]
0728 
0729 # port number for simple http frontend. For apache frontend port number is set in httpd.conf
0730 portNumber = 25080
0731 
0732 # number of threads
0733 nThreads = 10
0734 
0735 # verbose
0736 verbose = False
0737 
0738 # type : simple or apache
0739 type = simple
0740 
0741 # enable token authentication of apache frontend; default is True
0742 authEnable = True
0743 
0744 # file of secret used in token signature
0745 secretFile = /FIXME
0746 
0747 # whether to verify token (of its signature, expiration, etc.) when decoding token
0748 verifyToken = True
0749 
0750 
0751 
0752 
0753 ##########################
0754 #
0755 # Sweeper parameters
0756 #
0757 
0758 [sweeper]
0759 
0760 # number of threads
0761 nThreads = 3
0762 
0763 # max number of workers to try in one cycle
0764 maxWorkers = 500
0765 
0766 # check interval in sec
0767 checkInterval = 180
0768 
0769 # sleep interval in sec
0770 sleepTime = 60
0771 
0772 # duration in hours to keep finished workers
0773 keepFinished = 24
0774 
0775 # duration in hours to keep failed workers
0776 keepFailed = 72
0777 
0778 # duration in hours to keep cancelled workers
0779 keepCancelled = 72
0780 
0781 # duration in hours to keep missed workers
0782 keepMissed = 24
0783 
0784 # disk cleaning interval in hours
0785 #diskCleanUpInterval = 1
0786 
0787 # comma-concatenated list of directory_name|high_watermark_in_GB to be cleaned up
0788 #diskHighWatermark = /dir1/subdir1|1000,/dir2/subdir2|5000
0789 
0790 
0791 
0792 ##########################
0793 #
0794 # Watcher parameters
0795 #
0796 
0797 [watcher]
0798 
0799 # a comma-concatenated list of file name of logs to watch (default: panda-db_proxy.log)
0800 logFileNameList = panda-db_proxy.log
0801 
0802 # action is taken when the last message is older than maxStalled sec. set 0 to disable the action
0803 maxStalled = 300
0804 
0805 # the number of messages to check interval
0806 nMessages = 1000
0807 
0808 # action is taken when it took more than maxDuration sec to generate nMessages messages. set 0 to disable the action
0809 maxDuration = 600
0810 
0811 # check interval in sec
0812 checkInterval = 180
0813 
0814 # sleep interval in sec
0815 sleepTime = 60
0816 
0817 # a comma-concatenated list of actions (email: to send alarms, kill: to kill forcefully, terminate: to kill with SIGTERM). or empty if no action
0818 actions =
0819 
0820 # name of env variable to keep pass-phrase
0821 passphraseEnv = HARVESTER_WATCHER_PASSPHRASE
0822 
0823 # hostname of SMTP server. note that parameters with the prefix of "mail" are required on
0824 mailServer = localhost
0825 
0826 # port of SMTP server
0827 mailPort = 25
0828 
0829 # use SSL for SMTP
0830 mailUseSSL = False
0831 
0832 # login user of SMTP server if any. leave it empty if SMTP doesn't need to logon
0833 mailUser =
0834 
0835 # login password of SMTP server if any. leave it empty if SMTP doesn't need to logon
0836 mailPassword =
0837 
0838 # email sender
0839 mailFrom = example_from@example.com
0840 
0841 # a comma-concatenated list of email recipients
0842 mailTo = example_to_1@example.com,example_to_2@example.com
0843 
0844 ##########################
0845 #
0846 # APF monitoring parameters
0847 #
0848 [apfmon]
0849 active = True
0850 
0851 ##########################
0852 #
0853 # Service monitor parameters
0854 #
0855 
0856 [service_monitor]
0857 active = True
0858 
0859 # optional in case you want to monitor any disk volume
0860 disk_volumes = data
0861 
0862 # pidfile only necessary when running in uwsgi
0863 pidfile = /var/log/harvester/panda_harvester.pid
0864 
0865 ##########################
0866 #
0867 # Google cloud parameters
0868 #
0869 
0870 [googlecloud]
0871 
0872 # zone where you are booting up your VMs and storage, e.g. us-east1-b
0873 zone = us-east1-b
0874 # project defined in the google compute account, where the activity will be billed
0875 project = atlas-harvester
0876 # private service account json generated in the google cloud management console
0877 service_account_file = /path/to/service_file.json
0878 # file with the user data to send to CERN VM
0879 user_data_file = /path/to/user_data.txt
0880 # image to use
0881 image = https://www.googleapis.com/compute/v1/projects/atlas-harvester/global/images/cernvm4-micro-3-0-6
0882 # harvester frontend
0883 harvester_frontend = aipanda170.cern.ch:25443
0884 
0885 
0886 
0887 
0888 ##########################
0889 #
0890 # File Syncer parameters
0891 #
0892 
0893 [file_syncer]
0894 
0895 # plugin configs in json
0896 # pluginConfigs =
0897 #     [
0898 #       {
0899 #         "module": "pandaharvester.harvesterfilesyncer.git_file_syncer",
0900 #         "name": "GitFileSyncer",
0901 #         "configs": {
0902 #           "grid_qconf": {
0903 #             "targetDir": "/data/atlpan/harvester_configurations",
0904 #             "sourceURL": "https://github.com/PanDAWMS/harvester_configurations.git",
0905 #             "sourceBranch": "master",
0906 #             "sourceRemoteName": "origin",
0907 #             "sourceSubdir": "GRID/condor_sdf_templates"
0908 #           }
0909 #         }
0910 #       }
0911 #     ]
0912 
0913 # sleep interval in sec
0914 sleepTime = 600
0915 
0916 
0917 
0918 ##########################
0919 #
0920 # Log level parameters
0921 #
0922 # To set logging level for each logger
0923 #
0924 # Notes: Global logging level is set by log_level in panda_common.cfg
0925 
0926 [log_level]
0927 
0928 # logger_name = level (CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET)
0929 #
0930 # E.g. to set INFO level to panda-monitor.log
0931 # monitor = INFO