File indexing completed on 2026-04-10 08:38:59
0001 import uuid
0002 from urllib.parse import quote
0003
0004 from userinterface import Client
0005
0006 taskParamMap = {}
0007
0008 taskParamMap["noInput"] = True
0009 taskParamMap["nEventsPerJob"] = 1
0010 taskParamMap["nEvents"] = 5
0011 taskParamMap["taskName"] = str(uuid.uuid4())
0012 taskParamMap["userName"] = "pandasrv1"
0013 taskParamMap["vo"] = "atlas"
0014 taskParamMap["taskPriority"] = 1000
0015 taskParamMap["reqID"] = 12345
0016 taskParamMap["architecture"] = ""
0017 taskParamMap["hpoWorkflow"] = True
0018 taskParamMap["transUses"] = ""
0019 taskParamMap["transHome"] = ""
0020 taskParamMap["transPath"] = "http://pandaserver.cern.ch:25080/trf/user/runHPO-00-00-01"
0021 taskParamMap["processingType"] = "simul"
0022 taskParamMap["prodSourceLabel"] = "test"
0023 taskParamMap["useLocalIO"] = 1
0024 taskParamMap["taskType"] = "prod"
0025 taskParamMap["workingGroup"] = "AP_HPO"
0026 taskParamMap["coreCount"] = 1
0027 taskParamMap["site"] = "BNL_PROD_UCORE"
0028 taskParamMap["nucleus"] = "CERN-PROD"
0029 taskParamMap["cloud"] = "WORLD"
0030
0031 logDatasetName = f"panda.jeditest.log.{uuid.uuid4()}"
0032 outDatasetName = f"panda.jeditest.HPO.{uuid.uuid4()}"
0033
0034 taskParamMap["log"] = {
0035 "dataset": logDatasetName,
0036 "type": "template",
0037 "param_type": "log",
0038 "token": "ddd:.*DATADISK",
0039 "destination": "(type=DATADISK)\(dontkeeplog=True)",
0040 "offset": 1000,
0041 "value": f"{logDatasetName}.${{SN}}.log.tgz",
0042 }
0043
0044 taskParamMap["hpoRequestData"] = {
0045 "sandbox": "gitlab-registry.cern.ch/zhangruihpc/steeringcontainer:latest",
0046 "executable": "docker",
0047 "arguments": '/bin/bash -c "hpogrid generate --n_point=%NUM_POINTS ' "--max_point=%MAX_POINTS --infile=$PWD/%IN --outfile=$PWD/%OUT " '-l=nevergrad"',
0048 "output_json": "output.json",
0049 "max_points": 10,
0050 "num_points_per_generation": 2,
0051 }
0052
0053 taskParamMap["container_name"] = "docker://gitlab-registry.cern.ch/zhangruihpc/evaluationcontainer:mlflow"
0054
0055 taskParamMap["jobParameters"] = [
0056 {"type": "constant", "value": f"-o out.json -j \"\" -p \"{quote('bash ./exec_in_container.sh')}\""},
0057 {"type": "constant", "value": "--writeInputToTxt IN_DATA:input_ds.json --inSampleFile input_sample.json"},
0058 {"type": "constant", "value": "-a aaa.tgz --sourceURL https://aipanda048.cern.ch:25443"},
0059 {"type": "constant", "value": "--inMap \"{'IN_DATA': ${IN_DATA/T}}\""},
0060 {
0061 "type": "template",
0062 "param_type": "input",
0063 "value": '-i "${IN_DATA/T}"',
0064 "dataset": "mc16_13TeV.501103.MGPy8EG_StauStauDirect_220p0_1p0_TFilt.merge.EVNT.e8102_e7400_tid21342682_00",
0065 "attributes": "nosplit,repeat",
0066 },
0067 {
0068 "type": "template",
0069 "param_type": "output",
0070 "token": "ATLASDATADISK",
0071 "value": "$JEDITASKID.metrics.${SN}.tgz",
0072 "dataset": outDatasetName,
0073 "hidden": True,
0074 },
0075 {
0076 "type": "constant",
0077 "value": "--outMetricsFile=${OUTPUT0}^metrics.tgz",
0078 },
0079 ]
0080
0081 print(Client.insertTaskParams(taskParamMap))