Skip to content

Instantly share code, notes, and snippets.

@khalfella
Created March 8, 2017 21:30
Show Gist options
  • Save khalfella/fe81a6a701b4315141f24380619ee1ef to your computer and use it in GitHub Desktop.
Save khalfella/fe81a6a701b4315141f24380619ee1ef to your computer and use it in GitHub Desktop.
[root@sup-spc-east1a ~]# workflow east-1a /jobs/cca1bfec-5b98-4d3f-8b13-41984ac1d5db
HTTP/1.1 200 OK
request-id: cca1bfec-5b98-4d3f-8b13-41984ac1d5db
Content-Type: application/json
Content-Length: 70586
Access-Control-Allow-Origin: *
Access-Control-Allow-Headers: Accept, Accept-Version, Content-Length, Content-MD5, Content-Type, Date, Api-Version, Response-Time
Access-Control-Allow-Methods: GET, HEAD
Access-Control-Expose-Headers: Api-Version, Request-Id, Response-Time
Connection: Keep-Alive
Content-MD5: LyTKZvkotLo6C2HisW5jeg==
Date: Wed, 08 Mar 2017 21:28:18 GMT
Server: WorkflowAPI
Response-Time: 10
x-request-id: 4e3621b5-e986-4a2a-beb5-5f3c5ef37836
x-response-time: 10
x-server-name: ba920791-3a92-4744-8f00-5b8bd77280d8
{
"execution": "failed",
"chain_results": [
{
"result": "All parameters OK!",
"error": "",
"name": "common.validate_params",
"started_at": "2017-03-07T17:10:08.214Z",
"finished_at": "2017-03-07T17:10:08.218Z"
},
{
"result": "Action set",
"error": "",
"name": "workflow.set_job_action",
"started_at": "2017-03-07T17:10:08.255Z",
"finished_at": "2017-03-07T17:10:08.258Z"
},
{
"result": "Image has generate_passwords=true but no users found",
"error": "",
"name": "imgapi.generate_passwords",
"started_at": "2017-03-07T17:10:08.284Z",
"finished_at": "2017-03-07T17:10:08.287Z"
},
{
"result": "Networks are valid",
"error": "",
"name": "napi.validate_networks",
"started_at": "2017-03-07T17:10:08.317Z",
"finished_at": "2017-03-07T17:10:08.332Z"
},
{
"result": "OK",
"error": "",
"name": "cnapi.check_manual_server_nics",
"started_at": "2017-03-07T17:10:08.360Z",
"finished_at": "2017-03-07T17:10:08.364Z"
},
{
"result": "OK",
"error": "",
"name": "dapi.get_allocation_ticket",
"started_at": "2017-03-07T17:10:08.398Z",
"finished_at": "2017-03-07T17:10:08.440Z"
},
{
"result": "OK",
"error": "",
"name": "dapi.wait_allocation_ticket",
"started_at": "2017-03-07T17:10:08.481Z",
"finished_at": "2017-03-07T17:10:08.486Z"
},
{
"result": "VM allocated to Server 44454c4c-5300-1043-804d-b8c04f524432",
"error": "",
"name": "dapi.get_allocation",
"started_at": "2017-03-07T17:10:08.513Z",
"finished_at": "2017-03-07T17:10:09.000Z"
},
{
"result": "OK",
"error": "",
"name": "cnapi.acquire_vm_ticket",
"started_at": "2017-03-07T17:10:09.106Z",
"finished_at": "2017-03-07T17:10:09.145Z"
},
{
"result": "OK",
"error": "",
"name": "dapi.release_allocation_ticket",
"started_at": "2017-03-07T17:10:09.316Z",
"finished_at": "2017-03-07T17:10:09.394Z"
},
{
"result": "OK",
"error": "",
"name": "cnapi.wait_on_vm_ticket",
"started_at": "2017-03-07T17:10:09.430Z",
"finished_at": "2017-03-07T17:10:09.435Z"
},
{
"result": "NICs allocated",
"error": "",
"name": "napi.provision_nics",
"started_at": "2017-03-07T17:10:09.526Z",
"finished_at": "2017-03-07T17:10:09.792Z"
},
{
"result": "No fabric NATs to provision",
"error": "",
"name": "cnapi.acquire_fabric_nat_tickets",
"started_at": "2017-03-07T17:10:09.884Z",
"finished_at": "2017-03-07T17:10:09.889Z"
},
{
"result": "No fabric NATs to provision",
"error": "",
"name": "napi.provision_fabric_nats",
"started_at": "2017-03-07T17:10:09.918Z",
"finished_at": "2017-03-07T17:10:09.922Z"
},
{
"result": "Payload prepared successfully",
"error": "",
"name": "prepare_payload",
"started_at": "2017-03-07T17:10:10.011Z",
"finished_at": "2017-03-07T17:10:10.013Z"
},
{
"result": "Ensure image task queued!",
"error": "",
"name": "cnapi.ensure_image",
"started_at": "2017-03-07T17:10:10.089Z",
"finished_at": "2017-03-07T17:10:10.411Z"
},
{
"result": "",
"error": {
"name": "Error",
"message": "socket hang up"
},
"name": "cnapi.wait_task_ensure_image",
"started_at": "2017-03-07T17:10:10.438Z",
"finished_at": "2017-03-07T18:10:10.440Z"
}
],
"params": {
"firewall_enabled": true,
"owner_uuid": "90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3",
"hostname": "6d11703ab260",
"alias": "docker-test-64290",
"internal_metadata": {
"docker:id": "6d11703ab260e450e5258927879d9bad4fc2aa60dc0c6112e66bae8eff2cbfab",
"docker:logdriver": "json-file",
"docker:logconfig": "{}",
"docker:noipmgmtd": true,
"docker:imageid": "0db1128ef4a30534f95684d3933920f5d8714ce2ee23be1de3f9ee2f5fc6c859",
"docker:imagename": "bahamat/authenticated-proxy",
"docker:entrypoint": "[\"/usr/sbin/apachectl\",\"-D\",\"FOREGROUND\"]",
"docker:env": "[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"]",
"docker:tcp_unpublished_ports": "[80]",
"docker:cmd": "[]",
"docker:linkHosts": "",
"docker:linkEnv": "[]"
},
"tags": {
"sdc_docker": true
},
"zlog_max_size": 50000000,
"autoboot": false,
"docker": true,
"restart_init": false,
"tmpfs": 0,
"filesystems": [],
"networks": [
{
"primary": true,
"ipv4_uuid": "60fc4d91-3851-41c2-a947-bbe966480e02",
"ipv4_count": 1,
"uuid": "60fc4d91-3851-41c2-a947-bbe966480e02"
}
],
"billing_id": "14af2214-d0f8-11e5-9399-77e0d621f66d",
"image_uuid": "0e3ffa34-b1dd-205b-0fac-86abc82b19c8",
"brand": "lx",
"init_name": "/native/usr/vm/sbin/dockerinit",
"kernel_version": "3.13.0",
"firewall_rules": [
{
"enabled": true,
"owner_uuid": "90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3",
"rule": "FROM tag \"sdc_docker\" TO tag \"sdc_docker\" ALLOW udp PORT all",
"uuid": "35e57e31-fc27-42ef-8d43-5501b28723da",
"version": "1483550695145.015791"
},
{
"enabled": true,
"owner_uuid": "90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3",
"rule": "FROM tag \"sdc_docker\" TO tag \"sdc_docker\" ALLOW tcp PORT all",
"uuid": "7bda66c8-f6a3-4cd4-b092-3c4ad3bc44b4",
"version": "1483550695135.015791"
}
],
"sync": true,
"server_uuid": "44454c4c-5300-1043-804d-b8c04f524432",
"cpu_cap": 84,
"max_lwps": 4000,
"max_physical_memory": 1024,
"max_swap": 4096,
"quota": 25,
"zfs_io_priority": 128,
"ram": 1024,
"cpu_shares": 128,
"package": {
"uuid": "14af2214-d0f8-11e5-9399-77e0d621f66d",
"name": "g4-highcpu-1G",
"version": "1.0.3",
"active": true,
"cpu_cap": 84,
"description": "Compute Optimized 1G RAM - 0.5 vCPU - 25 GB Disk",
"max_lwps": 4000,
"max_physical_memory": 1024,
"max_swap": 4096,
"common_name": "Compute Optimized - 1G",
"quota": 25600,
"zfs_io_priority": 128,
"fss": 128,
"billing_tag": "g4-highcpu-1G",
"created_at": "2016-12-02T22:13:11.922Z",
"updated_at": "2017-01-05T17:24:01.832Z",
"default": true,
"group": "Compute Optimized",
"v": 1
},
"image": {
"v": 2,
"uuid": "0e3ffa34-b1dd-205b-0fac-86abc82b19c8",
"owner": "930896af-bf8c-48d4-885c-6573a94b1853",
"name": "docker-layer",
"version": "0db1128ef4a3",
"state": "active",
"disabled": false,
"public": false,
"published_at": "2017-01-03T17:55:58.297Z",
"type": "docker",
"os": "linux",
"files": [
{
"sha1": "96e379fb98bd1b401c6ee52230c11d38251572cf",
"size": 32,
"compression": "none"
}
],
"description": "/bin/sh -c #(nop) ENTRYPOINT [\"/usr/sbin/apachectl\" \"-D\" \"FOREGROUND\"]",
"origin": "f912f576-96a4-da55-d613-ed40bc89f085",
"tags": {
"docker:repo": "bahamat/authenticated-proxy",
"docker:id": "0db1128ef4a30534f95684d3933920f5d8714ce2ee23be1de3f9ee2f5fc6c859",
"docker:architecture": "amd64",
"docker:config": {
"Cmd": null,
"Entrypoint": [
"/usr/sbin/apachectl",
"-D",
"FOREGROUND"
],
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"WorkingDir": ""
}
}
},
"sdc_nat_pool": "27ea1d5f-df02-410e-843a-c60dba9ec5ca",
"task": "provision",
"vm_uuid": "6d11703a-b260-e450-e525-8927879d9bad",
"current_state": "provisioning",
"x-request-id": "c75230ff-f219-4448-aa3d-0afa80c7e3e9",
"imgapiPeers": [],
"fabricNatNics": [],
"nics": [
{
"belongs_to_type": "zone",
"belongs_to_uuid": "6d11703a-b260-e450-e525-8927879d9bad",
"mac": "90:b8:d0:f1:b9:bc",
"owner_uuid": "90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3",
"primary": true,
"state": "provisioning",
"ip": "192.168.130.107",
"fabric": true,
"gateway": "192.168.128.1",
"gateway_provisioned": true,
"internet_nat": true,
"mtu": 8500,
"netmask": "255.255.252.0",
"nic_tag": "sdc_overlay/15429807",
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"vlan_id": 2,
"network_uuid": "60fc4d91-3851-41c2-a947-bbe966480e02",
"cn_uuid": "44454c4c-5300-1043-804d-b8c04f524432"
}
],
"jobid": "cca1bfec-5b98-4d3f-8b13-41984ac1d5db",
"payload": {
"uuid": "6d11703a-b260-e450-e525-8927879d9bad",
"image": {
"v": 2,
"uuid": "0e3ffa34-b1dd-205b-0fac-86abc82b19c8",
"owner": "930896af-bf8c-48d4-885c-6573a94b1853",
"name": "docker-layer",
"version": "0db1128ef4a3",
"state": "active",
"disabled": false,
"public": false,
"published_at": "2017-01-03T17:55:58.297Z",
"type": "docker",
"os": "linux",
"files": [
{
"sha1": "96e379fb98bd1b401c6ee52230c11d38251572cf",
"size": 32,
"compression": "none"
}
],
"description": "/bin/sh -c #(nop) ENTRYPOINT [\"/usr/sbin/apachectl\" \"-D\" \"FOREGROUND\"]",
"origin": "f912f576-96a4-da55-d613-ed40bc89f085",
"tags": {
"docker:repo": "bahamat/authenticated-proxy",
"docker:id": "0db1128ef4a30534f95684d3933920f5d8714ce2ee23be1de3f9ee2f5fc6c859",
"docker:architecture": "amd64",
"docker:config": {
"Cmd": null,
"Entrypoint": [
"/usr/sbin/apachectl",
"-D",
"FOREGROUND"
],
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"WorkingDir": ""
}
}
},
"alias": "docker-test-64290",
"autoboot": false,
"billing_id": "14af2214-d0f8-11e5-9399-77e0d621f66d",
"brand": "lx",
"cpu_cap": 84,
"cpu_shares": 128,
"docker": true,
"firewall_enabled": true,
"firewall_rules": [
{
"enabled": true,
"owner_uuid": "90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3",
"rule": "FROM tag \"sdc_docker\" TO tag \"sdc_docker\" ALLOW udp PORT all",
"uuid": "35e57e31-fc27-42ef-8d43-5501b28723da",
"version": "1483550695145.015791"
},
{
"enabled": true,
"owner_uuid": "90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3",
"rule": "FROM tag \"sdc_docker\" TO tag \"sdc_docker\" ALLOW tcp PORT all",
"uuid": "7bda66c8-f6a3-4cd4-b092-3c4ad3bc44b4",
"version": "1483550695135.015791"
}
],
"hostname": "6d11703ab260",
"init_name": "/native/usr/vm/sbin/dockerinit",
"internal_metadata": {
"docker:id": "6d11703ab260e450e5258927879d9bad4fc2aa60dc0c6112e66bae8eff2cbfab",
"docker:logdriver": "json-file",
"docker:logconfig": "{}",
"docker:noipmgmtd": true,
"docker:imageid": "0db1128ef4a30534f95684d3933920f5d8714ce2ee23be1de3f9ee2f5fc6c859",
"docker:imagename": "bahamat/authenticated-proxy",
"docker:entrypoint": "[\"/usr/sbin/apachectl\",\"-D\",\"FOREGROUND\"]",
"docker:env": "[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"]",
"docker:tcp_unpublished_ports": "[80]",
"docker:cmd": "[]",
"docker:linkHosts": "",
"docker:linkEnv": "[]"
},
"kernel_version": "3.13.0",
"max_lwps": 4000,
"max_physical_memory": 1024,
"max_swap": 4096,
"nics": [
{
"belongs_to_type": "zone",
"belongs_to_uuid": "6d11703a-b260-e450-e525-8927879d9bad",
"mac": "90:b8:d0:f1:b9:bc",
"owner_uuid": "90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3",
"primary": true,
"state": "provisioning",
"ip": "192.168.130.107",
"fabric": true,
"gateway": "192.168.128.1",
"gateway_provisioned": true,
"internet_nat": true,
"mtu": 8500,
"netmask": "255.255.252.0",
"nic_tag": "sdc_overlay/15429807",
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"vlan_id": 2,
"network_uuid": "60fc4d91-3851-41c2-a947-bbe966480e02",
"cn_uuid": "44454c4c-5300-1043-804d-b8c04f524432"
}
],
"owner_uuid": "90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3",
"quota": 25,
"ram": 1024,
"zfs_io_priority": 128,
"zlog_max_size": 50000000,
"tags": {
"sdc_docker": true
},
"tmpfs": 0,
"archive_on_delete": true,
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"image_uuid": "0e3ffa34-b1dd-205b-0fac-86abc82b19c8",
"filesystems": [],
"imgapiPeers": []
}
},
"image_uuid": "0e3ffa34-b1dd-205b-0fac-86abc82b19c8",
"server_uuid": "44454c4c-5300-1043-804d-b8c04f524432",
"task": "provision",
"target": "/provision-6d11703a-b260-e450-e525-8927879d9bad",
"vm_uuid": "6d11703a-b260-e450-e525-8927879d9bad",
"workflow": "6e15678a-8316-4a07-b712-03248550f24c",
"exec_after": null,
"num_attempts": 0,
"name": "provision-7.3.0",
"version": "7.3.0",
"chain": [
{
"name": "common.validate_params",
"timeout": 10,
"retry": 1,
"body": "function validateParams(job, cb) {\n if (napiUrl === undefined) {\n return cb('No NAPI parameters provided');\n }\n\n if (ufdsUrl === undefined || ufdsDn === undefined ||\n ufdsPassword === undefined) {\n return cb('No UFDS parameters provided');\n }\n\n if (cnapiUrl === undefined) {\n return cb('No CNAPI URL provided');\n }\n\n if (fwapiUrl === undefined) {\n return cb('No FWAPI URL provided');\n }\n\n if (imgapiUrl === undefined) {\n return cb('No IMGAPI URL provided');\n }\n\n if (sapiUrl === undefined) {\n return cb('No SAPI URL provided');\n }\n\n if (job.params['owner_uuid'] === undefined) {\n return cb('\\'owner_uuid\\' is required');\n }\n\n if (job.params.brand === undefined) {\n return cb('VM \\'brand\\' is required');\n }\n\n return cb(null, 'All parameters OK!');\n}"
},
{
"name": "workflow.set_job_action",
"timeout": 10,
"retry": 1,
"body": "function setJobAction(job, cb) {\n job.action = 'provision';\n return cb(null, 'Action set');\n}",
"modules": {}
},
{
"name": "imgapi.generate_passwords",
"timeout": 10,
"retry": 1,
"body": "function generatePasswords(job, cb) {\n var log = job.log;\n var execFile = childProcess.execFile;\n var PWD_LENGTH = 12;\n var APG_COMMAND = '/opt/local/bin/apg';\n var APG_ARGS = [\n '-m', PWD_LENGTH,\n '-M', 'SCNL',\n '-n', 1,\n '-E', '\"\\'@$%&*/.:[]\\\\'\n ];\n\n if (job.params.image['generate_passwords'] === false) {\n return cb(null, 'No need to generate passwords for image');\n }\n\n if (job.params.image.users === undefined ||\n !Array.isArray(job.params.image.users)) {\n return cb(null, 'Image has generate_passwords=true but no users found');\n }\n\n if (job.params['internal_metadata'] === undefined) {\n job.params['internal_metadata'] = {};\n }\n\n var users = job.params.image.users;\n var name;\n var password;\n\n async.mapSeries(users, function (user, next) {\n name = user.name + '_pw';\n if (job.params['internal_metadata'][name] === undefined) {\n execFile(APG_COMMAND, APG_ARGS, function (err, stdout, stderr) {\n if (err) {\n log.info({ err: err }, 'Error generating random password');\n return next(err);\n }\n\n password = stdout.toString().replace(/\\n|\\r/g, '');\n job.params['internal_metadata'][name] = password;\n return next();\n });\n } else {\n return next();\n }\n }, function (err) {\n if (err) {\n cb(err, 'Could not generate passwords');\n } else {\n cb(null, 'Passwords generated for Image');\n }\n });\n}",
"modules": {
"childProcess": "child_process",
"async": "async"
}
},
{
"name": "napi.validate_networks",
"timeout": 10,
"retry": 1,
"body": "function validateNetworks(job, cb) {\n var networks = job.params.networks;\n\n // add-nics also calls this function, but if macs are provided we don't\n // necessarily need to progress further\n if (job.params.macs && !networks) {\n return cb();\n }\n\n var newNetworks = [];\n if (!networks) {\n return cb('Networks are required');\n }\n\n var napi = new sdcClients.NAPI({\n url: napiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n job.nicTags = [];\n\n // - Resolve network names to uuids when applicable\n // - Repopulate job.params.networks\n // - Returns cb(err, uuid). uuid is present when name was resolved to uuid\n function getNetwork(netId, callback) {\n /*JSSTYLED*/\n var UUID_RE = /^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$/;\n var netFn, poolFn;\n var params;\n\n // Network can be either by name or uuid\n if (UUID_RE.test(netId)) {\n params = netId;\n netFn = napi.getNetwork;\n poolFn = napi.getNetworkPool;\n } else {\n params = { name: netId };\n netFn = napi.listNetworks;\n poolFn = napi.listNetworkPools;\n }\n\n netFn.call(napi, params, function (err, nets) {\n // If network is not found then it might be a network pool\n if (err && err.name !== 'ResourceNotFoundError') {\n return callback(err);\n }\n\n // Did we get the network from list or get?\n var net = (Array.isArray(nets) ? nets[0] : nets);\n // No net if NAPI returns an empty array or if we got a 404\n if (net) {\n job.nicTags.push(net.nic_tag);\n return callback(null, net.uuid);\n }\n\n // We might be trying to provision on a network pool, so\n // try that instead\n poolFn.call(napi, params, function (err2, pools) {\n if (err2) {\n return callback(err2);\n }\n\n // NAPI-121: listNetworkPools should accept a name\n if (Array.isArray(pools)) {\n pools = pools.filter(function (pool) {\n return pool.name === netId;\n });\n if (pools.length) {\n job.nicTags.push(pools[0].nic_tag);\n callback(null, pools[0].uuid);\n } else {\n callback(new Error('No such Network Pool with ' +\n 'name: ' + netId));\n }\n } else {\n job.nicTags.push(pools.nic_tag);\n callback(null, pools.uuid);\n }\n });\n });\n }\n\n async.mapSeries(networks, function (network, next) {\n var netId;\n if (network.ipv4_uuid !== undefined) {\n netId = network.ipv4_uuid;\n } else if (network.name !== undefined) {\n netId = network.name;\n }\n\n getNetwork(netId, function (err, uuid) {\n if (err) {\n next(err);\n } else {\n network.uuid = uuid;\n network.ipv4_uuid = uuid;\n newNetworks.push(network);\n next();\n }\n });\n\n }, function (err2) {\n if (err2) {\n cb(err2);\n } else {\n job.params.networks = newNetworks;\n job.log.info({ nicTags: job.nicTags }, 'NIC Tags retrieved');\n cb(null, 'Networks are valid');\n }\n });\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
},
{
"name": "cnapi.check_manual_server_nics",
"timeout": 10,
"retry": 1,
"body": "function checkManualServerNics(job, cb) {\n var serverUuid = job.params['server_uuid'];\n\n if (!serverUuid) {\n return cb();\n }\n\n var headers = { 'x-request-id': job.params['x-request-id'] };\n var cnapi = new sdcClients.CNAPI({ url: cnapiUrl, headers: headers });\n\n return cnapi.getServer(serverUuid, function (err, server) {\n if (err) {\n return cb(err);\n }\n\n var nicTags = job.nicTags;\n var interfaces = server.sysinfo['Network Interfaces'];\n var found = 0;\n var vnics = server.sysinfo['Virtual Network Interfaces'] || {};\n\n Object.keys(interfaces).forEach(function (iname) {\n var serverTags = interfaces[iname]['NIC Names'];\n\n for (var i = 0; i < nicTags.length; i++) {\n if (serverTags.indexOf(nicTags[i]) !== -1) {\n found++;\n }\n }\n });\n\n Object.keys(vnics).forEach(function (iname) {\n var serverTags = vnics[iname]['Overlay Nic Tags'] || [];\n\n for (var i = 0; i < nicTags.length; i++) {\n if (serverTags.indexOf(nicTags[i]) !== -1) {\n found++;\n }\n }\n });\n\n if (found == nicTags.length) {\n return cb(null, 'Manual server meets NIC Tag requirements');\n } else {\n job.log.info({\n nicTags: nicTags,\n found: found,\n interfaces: interfaces,\n vnics: vnics\n }, 'Missing manual server nic tags');\n return cb('Manual server does not meet NIC Tag requirements');\n }\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "dapi.get_allocation_ticket",
"timeout": 30,
"retry": 1,
"body": "function acquireAllocationTicket(job, cb) {\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n // Find the headnode\n cnapi.listServers({ headnode: true }, function (err, headnodes) {\n if (err) {\n cb(err);\n return;\n }\n\n if (!headnodes || !headnodes.length) {\n cb(new Error('no headnodes returned'));\n return;\n }\n\n var newTicket = {\n scope: 'vm-allocate',\n id: 'global',\n expires_at: (new Date(\n Date.now() + 60 * 1000).toISOString()),\n action: 'allocate',\n workflow_job_uuid: job.uuid\n };\n\n cnapi.waitlistTicketCreate(headnodes[0].uuid, newTicket, onCreate);\n });\n\n\n function onCreate(err, ticket) {\n if (err) {\n cb(err);\n return;\n }\n\n cnapi.waitlistTicketGet(\n ticket.uuid, function (geterr, getticket)\n {\n if (geterr) {\n cb(geterr);\n return;\n }\n job.allocationTicket = getticket;\n cb();\n });\n }\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "dapi.wait_allocation_ticket",
"timeout": 120,
"retry": 1,
"body": "function waitOnAllocationTicket(job, cb) {\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n var allocationTicket = job.allocationTicket;\n\n if (allocationTicket.status === 'active') {\n return cb();\n }\n\n cnapi.waitlistTicketWait(allocationTicket.uuid, cb);\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "dapi.get_allocation",
"timeout": 30,
"retry": 1,
"body": "function getAllocation(job, cb) {\n var nicTags = job.nicTags;\n var pkg = job.params.package;\n var img = job.params.image;\n\n if (!nicTags) {\n return cb('NIC tags are required');\n }\n\n if (!img) {\n return cb('Image is required');\n }\n\n if (job.params['server_uuid']) {\n cb(null, 'Server UUID present, no need to get allocation from DAPI');\n return;\n }\n\n // There is no sdc-client for CNAPI's DAPI yet\n var cnapi = restify.createJsonClient({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n var payload = {\n vm: job.params,\n image: img,\n package: pkg,\n nic_tags: nicTags\n };\n\n job.log.info({ dapiPayload: payload }, 'Payload sent to DAPI');\n\n return cnapi.post('/allocate', payload, function (err, req, res, body) {\n if (err) {\n return cb(err);\n }\n\n job.params['server_uuid'] = body.server.uuid;\n job.params.imgapiPeers = body.imgapiPeers;\n job.server_uuid = body.server.uuid;\n return cb(null, 'VM allocated to Server ' + body.server.uuid);\n });\n}",
"modules": {
"restify": "restify"
}
},
{
"name": "cnapi.acquire_vm_ticket",
"timeout": 30,
"retry": 1,
"body": "function acquireVMTicket(job, cb) {\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var server_uuid = job.params.server_uuid;\n var newTicket = {\n scope: 'vm',\n id: job.params.vm_uuid,\n expires_at: (new Date(\n Date.now() + 600 * 1000).toISOString()),\n action: job.action\n };\n\n if (job.action === 'provision') {\n newTicket.extra = {\n workflow_job_uuid: job.uuid,\n owner_uuid: job.params.owner_uuid,\n max_physical_memory: job.params.max_physical_memory,\n cpu_cap: job.params.cpu_cap,\n quota: job.params.quota,\n brand: job.params.brand,\n disks: job.params.disks\n };\n\n if (job.params.brand === 'kvm' && job.params.image) {\n newTicket.extra.image_size = job.params.image.image_size;\n }\n }\n\n cnapi.waitlistTicketCreate(server_uuid, newTicket, onCreate);\n\n function onCreate(err, ticket) {\n if (err) {\n cb(err);\n return;\n }\n\n // look up ticket, ensure it's not expired etc\n cnapi.waitlistTicketGet(ticket.uuid,\n function (geterr, getticket) {\n if (geterr) {\n cb(geterr);\n return;\n }\n job.ticket = getticket;\n job.log.info(\n { ticket: getticket }, 'ticket status after wait');\n cb();\n });\n }\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "dapi.release_allocation_ticket",
"timeout": 30,
"retry": 1,
"body": "function releaseAllocationTicket(job, cb) {\n if (!job.allocationTicket) {\n return cb();\n }\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n cnapi.waitlistTicketRelease(job.allocationTicket.uuid, function (err) {\n if (err) {\n job.log.warn({err: err, ticket: job.ticket},\n 'error releasing CNAPI waitlist allocation ticket');\n return;\n }\n cb();\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.wait_on_vm_ticket",
"timeout": 120,
"retry": 1,
"body": "function waitOnVMTicket(job, cb) {\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n var ticket = job.ticket;\n\n if (ticket.status === 'active') {\n cb();\n return;\n }\n cnapi.waitlistTicketWait(job.ticket.uuid, cb);\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "napi.provision_nics",
"timeout": 20,
"retry": 1,
"body": "function provisionNics(job, cb) {\n var networks = job.params.networks;\n if (networks === undefined) {\n cb('Networks are required');\n return;\n }\n\n var napi = new sdcClients.NAPI({\n url: napiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n // Every NIC we provision is added to this array\n var nics = [];\n var primaryFound = false;\n job.params.fabricNatNics = [];\n\n networks.forEach(function (net) {\n if (net.primary)\n primaryFound = true;\n // Make absolutely sure we're never overriding NAPI's network\n // owner checks:\n delete net.check_owner;\n });\n\n if (!primaryFound && networks.length > 0)\n networks[0].primary = true;\n\n // Return a new copy for every time we provision a new NIC and avoid\n // accidentally reusing an object\n function nicParams() {\n return {\n owner_uuid: job.params.owner_uuid,\n belongs_to_uuid: job.params.uuid || job.params.vm_uuid,\n belongs_to_type: 'zone'\n };\n }\n\n // If this is a nic on a fabric, has no gateway provisioned, and the network\n // requests an internet NAT, add it\n function addFabricNatNic(fNic) {\n if (fNic && fNic.fabric && fNic.gateway && !fNic.gateway_provisioned &&\n fNic.ip !== fNic.gateway && fNic.internet_nat) {\n job.params.fabricNatNics.push(fNic);\n }\n }\n\n // Get current list of NICs that might have been provisioned ahead of time\n napi.listNics(nicParams(), function (err, res) {\n if (err) {\n cb(err);\n return;\n }\n return asyncProvisionNics(res);\n });\n\n function asyncProvisionNics(currentNics) {\n async.mapSeries(networks, function (network, next) {\n // If there is at least one provisioned NIC in one of the networks\n // provided, skip napi.provisionNic for this network\n var netNics = currentNics.filter(function (nic) {\n return (nic.network_uuid && nic.network_uuid ===\n network.ipv4_uuid);\n });\n\n if (netNics.length > 0) {\n nics = nics.concat(netNics);\n next();\n return;\n }\n\n var antiSpoofParams = ['allow_dhcp_spoofing', 'allow_ip_spoofing',\n 'allow_mac_spoofing', 'allow_restricted_traffic'];\n var params = nicParams();\n params.cn_uuid = job.params.server_uuid;\n if (network.ipv4_ips !== undefined)\n params.ip = network.ipv4_ips[0];\n if (network.primary !== undefined)\n params.primary = network.primary;\n\n antiSpoofParams.forEach(function (spoofParam) {\n if (network.hasOwnProperty(spoofParam)) {\n params[spoofParam] = network[spoofParam];\n }\n });\n\n napi.provisionNic(network.ipv4_uuid, params,\n function (suberr, nic) {\n if (suberr) {\n next(suberr);\n } else {\n nics.push(nic);\n addFabricNatNic(nic);\n next();\n }\n });\n }, function (err2) {\n if (err2) {\n cb(err2);\n } else {\n job.params.nics = nics;\n job.log.info({ nics: job.params.nics }, 'NICs allocated');\n\n cb(null, 'NICs allocated');\n }\n });\n }\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
},
{
"name": "cnapi.acquire_fabric_nat_tickets",
"timeout": 10,
"retry": 1,
"body": "function acquireFabricTickets(job, cb) {\n if (!job.params.fabricNatNics || job.params.fabricNatNics.length === 0) {\n return cb(null, 'No fabric NATs to provision');\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var nics = [];\n var netuuids = [];\n\n job.params.fabricNatTickets = [];\n\n // Uniquify, just in case\n for (var n in job.params.fabricNatNics) {\n if (netuuids.indexOf(job.params.fabricNatNics[n].network_uuid) === -1) {\n nics.push(job.params.fabricNatNics[n]);\n netuuids.push(job.params.fabricNatNics[n].network_uuid);\n }\n }\n\n cnapi.listServers({ headnode: true }, function (listErr, cns) {\n if (listErr) {\n cb(listErr);\n return;\n }\n\n if (!cns || cns.length === 0) {\n cb(new Error('Headnode not found in CNAPI'));\n return;\n }\n\n if (cns.length > 1) {\n job.params.headnodes = cns;\n cb(new Error('More than 1 headnode found in CNAPI'));\n return;\n }\n\n async.mapSeries(nics, function (nic, next) {\n var newTicket = {\n scope: 'fabric_nat',\n id: nic.network_uuid,\n expires_at: (new Date(\n Date.now() + 600 * 1000).toISOString())\n };\n\n cnapi.waitlistTicketCreate(cns[0].uuid, newTicket, onCreate);\n\n function onCreate(err, ticket) {\n if (err) {\n next(err);\n return;\n }\n\n // look up ticket, ensure it's not expired or invalid\n cnapi.waitlistTicketGet(ticket.uuid,\n function (geterr, getticket) {\n if (geterr) {\n next(geterr);\n return;\n }\n\n job.params.fabricNatTickets.push({\n nic: nic,\n ticket: getticket\n });\n job.log.info(\n { nic: nic, ticket: getticket },\n 'ticket status after create');\n next();\n });\n }\n }, function (sErr) {\n if (sErr) {\n cb(sErr);\n } else {\n cb(null, 'Fabric nat tickets acquired');\n }\n });\n });\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
},
{
"name": "napi.provision_fabric_nats",
"timeout": 120,
"retry": 1,
"body": "function provisionFabricNats(job, cb) {\n if (!job.params.fabricNatTickets ||\n job.params.fabricNatTickets.length === 0) {\n return cb(null, 'No fabric NATs to provision');\n }\n\n if (!job.params.sdc_nat_pool) {\n return cb(null, 'No fabric NAT pool configured for provisioning');\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var napi = new sdcClients.NAPI({\n url: napiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var natSvc;\n var sapi = new sdcClients.SAPI({\n log: job.log.child({ component: 'sapi' }),\n url: sapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n function releaseTicket(tErr, ticket, tCb) {\n cnapi.waitlistTicketRelease(ticket.uuid, function (relErr) {\n if (relErr) {\n job.log.error({ ticket: ticket, err: relErr },\n 'Error releasing ticket');\n }\n\n if (tErr) {\n tCb(tErr);\n return;\n }\n\n tCb(relErr);\n return;\n });\n }\n\n /*\n * Provision a new NAT zone through SAPI on two networks:\n * - the configured NAT network pool\n * - the fabric network that needs a NAT zone\n */\n function provisionNatZone(tick, done) {\n var fabricNic = tick.nic;\n\n // If we were waiting on a ticket because another NAT zone was being\n // provisioned and it succeeded, we don't need to provision another.\n napi.getNetwork(fabricNic.network_uuid, function (netErr, fNet) {\n if (netErr) {\n return done(netErr);\n }\n\n if (fNet.gateway_provisioned) {\n job.log.debug({ ticket: tick.ticket.uuid, net: fNet },\n 'Network already has gateway provisioned');\n tick.gateway_provisioned = true;\n return releaseTicket(null, tick.ticket, done);\n }\n\n var instParams = {\n metadata: {\n 'com.joyent:ipnat_subnet': fNet.subnet\n },\n params: {\n alias: 'nat-' + fabricNic.network_uuid,\n internal_metadata: {\n 'com.joyent:ipnat_owner': job.params.owner_uuid\n },\n networks: [\n {\n uuid: job.params.sdc_nat_pool,\n primary: true,\n allow_ip_spoofing: true\n },\n {\n uuid: fabricNic.network_uuid,\n ip: fabricNic.gateway,\n allow_ip_spoofing: true\n }\n ],\n ticket: tick.ticket.uuid\n }\n };\n\n sapi.createInstanceAsync(natSvc, instParams,\n function _afterSapiProv(createErr, inst) {\n if (createErr) {\n return releaseTicket(createErr, tick.ticket, done);\n }\n\n job.log.info({ instance: inst, natSvc: natSvc },\n 'Created NAT instance');\n\n tick.job_uuid = inst.job_uuid;\n tick.vm_uuid = inst.uuid;\n return done();\n });\n });\n }\n\n sapi.listServices({ name: 'nat' }, function (sapiErr, svcs) {\n if (sapiErr) {\n return cb(sapiErr);\n }\n\n if (!svcs || svcs.length === 0) {\n return cb(new Error('No \"nat\" service found in SAPI'));\n }\n\n if (svcs.length > 1) {\n return cb(new Error('More than one \"nat\" service found in SAPI'));\n }\n\n natSvc = svcs[0].uuid;\n job.log.info({ svc: natSvc, svcs: svcs }, 'svcs');\n\n async.forEach(job.params.fabricNatTickets, function (tick, next) {\n if (tick.ticket.status === 'active') {\n return provisionNatZone(tick, next);\n }\n\n cnapi.waitlistTicketWait(tick.ticket.uuid,\n function _afterWait(tErr) {\n if (tErr) {\n next(tErr);\n } else {\n provisionNatZone(tick, next);\n }\n });\n\n }, function (aErr) {\n if (aErr) {\n cb(aErr);\n } else {\n cb(null, 'Provisioned fabric NATs');\n }\n });\n });\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
},
{
"name": "prepare_payload",
"timeout": 10,
"retry": 1,
"body": "function preparePayload(job, cb) {\n job.params.jobid = job.uuid;\n\n var params = job.params;\n var i, j, nic;\n var payload = { uuid: params['vm_uuid'], image: job.params.image };\n var wantResolvers = true;\n\n if (payload.image.hasOwnProperty('tags') &&\n payload.image.tags.hasOwnProperty('kernel_version') &&\n !params.hasOwnProperty('kernel_version')) {\n\n params['kernel_version'] = payload.image.tags.kernel_version;\n }\n\n if (payload.image.type === 'lx-dataset') {\n params['brand'] = 'lx';\n }\n\n var keys = [ 'alias', 'autoboot', 'billing_id', 'brand', 'cpu_cap',\n 'cpu_shares', 'customer_metadata', 'delegate_dataset', 'dns_domain',\n 'docker', 'firewall_enabled', 'firewall_rules', 'fs_allowed',\n 'hostname', 'indestructible_zoneroot', 'indestructible_delegated',\n 'init_name', 'internal_metadata', 'kernel_version', 'limit_priv',\n 'maintain_resolvers', 'max_locked_memory', 'max_lwps', 'max_msg_ids',\n 'max_physical_memory', 'max_shm_memory', 'max_sem_ids', 'max_shm_ids',\n 'max_swap', 'mdata_exec_timeout', 'nics',\n 'owner_uuid', 'quota', 'ram',\n 'resolvers', 'vcpus', 'zfs_data_compression', 'zfs_io_priority',\n 'zlog_max_size', 'tags', 'tmpfs'\n ];\n\n for (i = 0; i < keys.length; i++) {\n var key = keys[i];\n if (params[key] !== undefined) {\n payload[key] = params[key];\n }\n }\n\n // Per OS-2520 we always want to be setting archive_on_delete in SDC\n payload['archive_on_delete'] = true;\n\n // If internal_metadata.set_resolvers === false, we always want\n // to leave the resolvers as empty\n if (params.internal_metadata !== undefined &&\n typeof (params.internal_metadata) === 'object' &&\n params.internal_metadata.set_resolvers === false) {\n wantResolvers = false;\n }\n\n // Add resolvers and routes in the order of the networks\n var resolver;\n var resolvers = [];\n var routes = {};\n for (i = 0; i < params.nics.length; i++) {\n nic = params.nics[i];\n\n if (nic['resolvers'] !== undefined &&\n Array.isArray(nic['resolvers'])) {\n for (j = 0; j < nic['resolvers'].length; j++) {\n resolver = nic['resolvers'][j];\n if (resolvers.indexOf(resolver) === -1) {\n resolvers.push(resolver);\n }\n }\n }\n\n if (nic['routes'] !== undefined &&\n typeof (nic['routes']) === 'object') {\n for (var r in nic['routes']) {\n if (!routes.hasOwnProperty(r)) {\n routes[r] = nic['routes'][r];\n }\n }\n }\n }\n\n if (wantResolvers && !payload.resolvers) {\n payload['resolvers'] = resolvers;\n }\n\n if (Object.keys(routes).length !== 0) {\n payload['routes'] = routes;\n }\n\n if (params['brand'] === 'kvm') {\n payload.disks = params.disks;\n\n ['disk_driver', 'nic_driver', 'cpu_type'].forEach(function (field) {\n if (params[field]) {\n payload[field] = params[field];\n } else {\n payload[field] = job.params.image[field];\n }\n });\n } else {\n payload['image_uuid'] = params['image_uuid'];\n\n if (params['filesystems'] !== undefined) {\n payload['filesystems'] = params['filesystems'];\n }\n }\n\n if (params.imgapiPeers !== undefined) {\n payload.imgapiPeers = params.imgapiPeers;\n }\n\n job.params.payload = payload;\n cb(null, 'Payload prepared successfully');\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.ensure_image",
"timeout": 300,
"retry": 1,
"body": "function ensureImage(job, cb) {\n var commonHeaders = { 'x-request-id': job.params['x-request-id'] };\n var cnapi = new sdcClients.CNAPI({ url: cnapiUrl, headers: commonHeaders });\n\n var ensurePayload = {};\n\n if (job.params['brand'] === 'kvm') {\n ensurePayload.image_uuid = job.params.payload.disks[0].image_uuid;\n } else {\n ensurePayload.image_uuid = job.params.image_uuid;\n }\n\n if (job.params.imgapiPeers !== undefined) {\n ensurePayload.imgapiPeers = job.params.imgapiPeers;\n }\n\n cnapi.ensureImage(job.params['server_uuid'], ensurePayload,\n function (error, task) {\n if (error) {\n return cb(error);\n }\n\n job.taskId = task.id;\n return cb(null, 'Ensure image task queued!');\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.wait_task_ensure_image",
"timeout": 3600,
"retry": 1,
"body": "function waitTask(job, cb) {\n if (job.params['skip_zone_action']) {\n cb(null, 'Skipping waitTask');\n return;\n }\n\n if (!job.taskId) {\n cb('No taskId provided');\n return;\n }\n\n if (!cnapiUrl) {\n cb('No CNAPI URL provided');\n return;\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n cnapi.waitTask(job.taskId, {}, onTask);\n\n function onTask(err, task) {\n if (err) {\n if (err.statusCode === 404) {\n // fallback to pollTask\n cnapi.pollTask(job.taskId, {}, function (pollerr, polltask) {\n // Make sure loops cannot happen\n if (pollerr && pollerr.statusCode === 404) {\n cb(pollerr);\n return;\n }\n onTask(pollerr, polltask);\n });\n return;\n }\n cb(err);\n } else if (task && task.status == 'failure') {\n cb(getErrorMesage(task));\n } else if (task && task.status == 'complete') {\n // Tasks that modify VM state should add a .vm to the task\n // with something like \"self.finish({ vm: machine });\"\n if (task.history && task.history.length > 0 &&\n task.history[0].name === 'finish' &&\n task.history[0].event &&\n task.history[0].event.vm) {\n\n job.finished_vm = task.history[0].event.vm;\n job.log.debug({vm_uuid: job.finished_vm.uuid},\n 'finish() returned VM');\n }\n\n cb(null, 'Job succeeded!');\n } else {\n cb(new Error('unexpected task status, ' + task.status));\n }\n }\n\n function getErrorMesage(task) {\n var message;\n var details = [];\n\n if (task.history !== undefined && task.history.length) {\n for (var i = 0; i < task.history.length; i++) {\n var event = task.history[i];\n if (event.name && event.name === 'error' && event.event &&\n event.event.error) {\n var err = event.event.error;\n if (typeof (err) === 'string') {\n message = err;\n if (event.event.details && event.event.details.error) {\n message += ', ' + event.event.details.error;\n }\n } else {\n message = err.message;\n }\n } else if (event.name && event.name === 'finish' &&\n event.event && event.event.log && event.event.log.length) {\n for (var j = 0; j < event.event.log.length; j++) {\n var logEvent = event.event.log[j];\n if (logEvent.level && logEvent.level === 'error') {\n details.push(logEvent.message);\n }\n }\n }\n }\n }\n\n // Apparently the task doesn't have any message for us...\n if (message === undefined) {\n message = 'Unexpected error occured';\n } else if (details.length) {\n message += ': ' + details.join(', ');\n }\n\n return message;\n }\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.wait_for_fabric_nat_provisions",
"timeout": 600,
"retry": 1,
"body": "function waitForFabricNatProvisions(job, cb) {\n if (!job.params.fabricNatTickets ||\n job.params.fabricNatTickets.length === 0) {\n return cb(null, 'No fabric NATs provisioned');\n }\n\n // Filter out tickets that didn't end up needing a gateway provisioned\n var toWaitFor = job.params.fabricNatTickets.filter(function (t) {\n return !t.gateway_provisioned;\n });\n\n if (toWaitFor.length === 0) {\n return cb(null, 'No fabric NAT provisions left to wait for');\n }\n\n var vmapi = new sdcClients.VMAPI({\n url: vmapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n function checkVm(tick, done) {\n var uuid = tick.vm_uuid;\n vmapi.getVm({ uuid: uuid }, onVmapi);\n\n function onVmapi(err, vm, req, res) {\n if (err) {\n cb(err);\n\n } else if (vm.state === 'running') {\n done();\n\n } else if (vm.state === 'failed') {\n done(new Error(\n 'NAT zone \"' + vm.uuid + '\" failed to provision'));\n\n } else {\n setTimeout(checkVm, 1000, tick, done);\n }\n }\n }\n\n async.forEach(toWaitFor, checkVm, function (aErr) {\n if (aErr) {\n cb(aErr);\n } else {\n cb(null, 'Fabric NATs running');\n }\n });\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
},
{
"name": "cnapi.provision_vm",
"timeout": 10,
"retry": 1,
"body": "function provision(job, cb) {\n delete job.params.skip_zone_action;\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n job.params.jobid = job.uuid;\n\n // autoboot=false means we want the machine to not to boot after provision\n if (job.params.autoboot === false || job.params.autoboot === 'false') {\n job.expects = 'stopped';\n } else {\n job.expects = 'running';\n }\n\n var server = job.params['server_uuid'];\n\n return cnapi.createVm(server, job.params.payload, function (err, task) {\n if (err) {\n return cb(err);\n } else {\n job.taskId = task.id;\n // As soon was we reach this point, we don't want to clean up NICs\n // when a provision fails\n job.markAsFailedOnError = false;\n return cb(null, 'Provision task: ' + task.id + ' queued!');\n }\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.wait_task",
"timeout": 3600,
"retry": 1,
"body": "function waitTask(job, cb) {\n if (job.params['skip_zone_action']) {\n cb(null, 'Skipping waitTask');\n return;\n }\n\n if (!job.taskId) {\n cb('No taskId provided');\n return;\n }\n\n if (!cnapiUrl) {\n cb('No CNAPI URL provided');\n return;\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n cnapi.waitTask(job.taskId, {}, onTask);\n\n function onTask(err, task) {\n if (err) {\n if (err.statusCode === 404) {\n // fallback to pollTask\n cnapi.pollTask(job.taskId, {}, function (pollerr, polltask) {\n // Make sure loops cannot happen\n if (pollerr && pollerr.statusCode === 404) {\n cb(pollerr);\n return;\n }\n onTask(pollerr, polltask);\n });\n return;\n }\n cb(err);\n } else if (task && task.status == 'failure') {\n cb(getErrorMesage(task));\n } else if (task && task.status == 'complete') {\n // Tasks that modify VM state should add a .vm to the task\n // with something like \"self.finish({ vm: machine });\"\n if (task.history && task.history.length > 0 &&\n task.history[0].name === 'finish' &&\n task.history[0].event &&\n task.history[0].event.vm) {\n\n job.finished_vm = task.history[0].event.vm;\n job.log.debug({vm_uuid: job.finished_vm.uuid},\n 'finish() returned VM');\n }\n\n cb(null, 'Job succeeded!');\n } else {\n cb(new Error('unexpected task status, ' + task.status));\n }\n }\n\n function getErrorMesage(task) {\n var message;\n var details = [];\n\n if (task.history !== undefined && task.history.length) {\n for (var i = 0; i < task.history.length; i++) {\n var event = task.history[i];\n if (event.name && event.name === 'error' && event.event &&\n event.event.error) {\n var err = event.event.error;\n if (typeof (err) === 'string') {\n message = err;\n if (event.event.details && event.event.details.error) {\n message += ', ' + event.event.details.error;\n }\n } else {\n message = err.message;\n }\n } else if (event.name && event.name === 'finish' &&\n event.event && event.event.log && event.event.log.length) {\n for (var j = 0; j < event.event.log.length; j++) {\n var logEvent = event.event.log[j];\n if (logEvent.level && logEvent.level === 'error') {\n details.push(logEvent.message);\n }\n }\n }\n }\n }\n\n // Apparently the task doesn't have any message for us...\n if (message === undefined) {\n message = 'Unexpected error occured';\n } else if (details.length) {\n message += ': ' + details.join(', ');\n }\n\n return message;\n }\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "vmapi.put_vm",
"timeout": 120,
"retry": 1,
"body": "function putVm(job, cb) {\n var vmapi;\n\n /*\n * Checks (polls) the state of a machine in VMAPI. It is used for provisions\n * and VM actions such as reboot and shutdown.\n *\n * IMPORTANT: this function an all uses of job.expects are deprecated and\n * will be removed in a future version after everyone is updated\n * past the old agent tasks that don't pass back the VMs. It is\n * being replaced with the putVm function and is now only called\n * from there.\n */\n function checkState(_job, _cb) {\n if (_job.params['skip_zone_action']) {\n _cb(null, 'Skipping checkState');\n return;\n }\n\n // For now don't fail the job if this parameter is not present\n if (!_job.expects) {\n _cb(null, 'No \\'expects\\' state parameter provided');\n return;\n }\n\n if (!_job.params['vm_uuid']) {\n _cb('No VM UUID provided');\n return;\n }\n\n if (!vmapiUrl) {\n _cb('No VMAPI URL provided');\n return;\n }\n\n var _vmapi = new sdcClients.VMAPI({\n url: vmapiUrl,\n headers: { 'x-request-id': _job.params['x-request-id'] }\n });\n\n // Repeat checkVm until VM data is updated\n checkVm();\n\n function checkVm() {\n _vmapi.getVm({ uuid: _job.params['vm_uuid'] }, onVmapi);\n\n function onVmapi(err, vm, req, res) {\n if (err) {\n _cb(err);\n } else if (vm.state == _job.expects) {\n _cb(null, 'VM is now ' + _job.expects);\n } else {\n if (_job.timeToDie) {\n _job.log.error('checkState.checkVm.onVmapi called after'\n + ' task completion, breaking loop');\n return;\n }\n setTimeout(checkVm, 1000);\n }\n }\n }\n }\n\n if (!job.finished_vm) {\n job.log.warn({req_id: job.params['x-request-id']},\n 'putVM() called but job.finished_vm is missing');\n\n checkState(job, cb);\n //\n // When checkState is removed:\n //\n // cb(null, 'job has no finished_vm, nothing to post to VMAPI');\n return;\n }\n\n if (!vmapiUrl) {\n cb(new Error('No VMAPI URL provided'));\n return;\n }\n\n job.log.debug({vmobj: job.finished_vm}, 'putVM() putting VM to VMAPI');\n\n //\n // Borrowed from vm-agent lib/vmapi-client.js\n //\n // DO NOT TRY THIS AT HOME!\n //\n // afaict the reason sdcClients does not have a putVm function in the first\n // place is that this is not something API clients should generally be\n // doing. WE need to do it, and vm-agent needs to do it, but other clients\n // should not be doing it unless they're absolutely sure that what they're\n // PUTing is the current state.\n //\n // We know that here because cn-agent tasks just did a VM.load for us.\n //\n sdcClients.VMAPI.prototype.putVm = function (vm, callback) {\n var log = job.log;\n var opts = { path: '/vms/' + vm.uuid };\n\n this.client.put(opts, vm, function (err, req, res) {\n if (err) {\n log.error(err, 'Could not update VM %s', vm.uuid);\n return callback(err);\n }\n\n log.info('VM (uuid=%s, state=%s, last_modified=%s) updated @ VMAPI',\n vm.uuid, vm.state, vm.last_modified);\n return callback();\n });\n };\n\n vmapi = new sdcClients.VMAPI({\n log: job.log,\n url: vmapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n vmapi.putVm(job.finished_vm, function (err) {\n if (err) {\n cb(err);\n return;\n }\n\n cb(null, 'put VM ' + job.finished_vm.uuid + ' to VMAPI');\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "fwapi.update",
"timeout": 10,
"retry": 1,
"body": "function updateFwapi(job, cb) {\n var fwapi = new sdcClients.FWAPI({\n url: fwapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var jobParams = job.params.payload || job.params;\n var type;\n var update = {};\n var vmProps = ['add_nics', 'firewall_enabled', 'nics', 'remove_ips',\n 'remove_nics', 'remove_tags', 'set_tags', 'tags'];\n\n if (job.params.task === 'provision') {\n type = 'vm.add';\n } else {\n type = (job.params.task === 'destroy') ? 'vm.delete' : 'vm.update';\n }\n\n vmProps.forEach(function (prop) {\n if (jobParams.hasOwnProperty(prop)) {\n update[prop] = jobParams[prop];\n }\n });\n\n job.log.info({ jobParams: jobParams, update: update }, 'update params');\n\n if (Object.keys(update).length === 0 && job.params.task !== 'destroy') {\n return cb(null, 'No properties affecting FWAPI found: not updating');\n }\n\n update.owner_uuid = jobParams.owner_uuid;\n update.server_uuid = jobParams.server_uuid;\n update.type = type;\n update.uuid = jobParams.uuid || jobParams.vm_uuid || job.params.vm_uuid;\n\n return fwapi.createUpdate(update, function (err, obj) {\n if (err) {\n job.log.warn(err, 'Error sending update to FWAPI');\n return cb(null, 'Error updating FWAPI');\n }\n\n return cb(null, 'Updated FWAPI with update UUID: ' + obj.update_uuid);\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.release_vm_ticket",
"timeout": 60,
"retry": 1,
"body": "function releaseVMTicket(job, cb) {\n if (!job.ticket) {\n return cb();\n }\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n cnapi.waitlistTicketRelease(job.ticket.uuid, function (err) {\n if (err) {\n job.log.warn({err: err, ticket: job.ticket},\n 'error releasing CNAPI waitlist VM ticket');\n }\n cb(err);\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.release_fabric_nat_ticket",
"timeout": 60,
"retry": 1,
"body": "function releaseFabricTicket(job, cb) {\n if (!job.params.ticket) {\n return cb(null, 'No ticket to release');\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n cnapi.waitlistTicketRelease(job.params.ticket, function (err) {\n if (err) {\n if (err.code === 'ResourceNotFound') {\n cb(null, 'Ticket released');\n } else {\n cb(err);\n }\n\n return;\n }\n\n cb(null, 'Released ticket ' + job.params.ticket);\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
}
],
"timeout": 3810,
"onerror": [
{
"name": "napi.cleanup_nics",
"timeout": 10,
"retry": 1,
"body": "function cleanupNics(job, cb) {\n // If this is false it means that cnapi.pollTask succeeded, so the VM exists\n // physically wether its provision failed or not\n if (job.markAsFailedOnError === false) {\n return cb(null, 'markAsFailedOnError was set to false, ' +\n 'won\\'t cleanup VM NICs');\n }\n\n var macs = job.params.macs;\n\n if (!macs) {\n var nics = job.params['add_nics'] || job.params['nics'];\n\n if (!nics) {\n return cb(null, 'No MACs given, and no NICs were provisioned');\n }\n\n macs = nics.map(function (nic) { return nic.mac; });\n }\n\n var napi = new sdcClients.NAPI({\n url: napiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n async.mapSeries(macs, function (mac, next) {\n napi.deleteNic(mac, next);\n }, function (err) {\n if (err) {\n cb(err);\n } else {\n cb(null, 'NICs removed');\n }\n });\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
},
{
"name": "set_post_back_failed",
"body": "function setPostBackFailed(job, cb) {\n // If this is false it means that cnapi.waitTask succeeded, so the VM exists\n // physically wether its provision failed or not\n if (job.markAsFailedOnError === false) {\n return cb(null, 'markAsFailedOnError was set to false, ' +\n 'won\\'t set postBackState for VM');\n }\n\n job.postBackState = 'failed';\n return cb(null, 'Set post back state as failed');\n}",
"modules": {}
},
{
"name": "common.post_back",
"body": "function postBack(job, cb) {\n if (job.markAsFailedOnError === false) {\n return cb(null, 'markAsFailedOnError was set to false, ' +\n 'won\\'t postBack provision failure to VMAPI');\n }\n\n var urls = job.params['post_back_urls'];\n var vmapiPath = vmapiUrl + '/job_results';\n\n // By default, post back to VMAPI\n if (urls === undefined || !Array.isArray(urls)) {\n urls = [ vmapiPath ];\n } else {\n urls.push(vmapiPath);\n }\n\n var obj = clone(job.params);\n obj.execution = job.postBackState || 'succeeded';\n\n async.mapSeries(urls, function (url, next) {\n var p = urlModule.parse(url);\n var api = restify.createJsonClient({\n url: p.protocol + '//' + p.host,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n api.post(p.pathname, obj, onResponse);\n\n function onResponse(err, req, res) {\n return next(err);\n }\n\n }, function (err2) {\n if (err2) {\n var errObject = { err: err2, urls: urls };\n job.log.info(errObject, 'Error posting back to URLs');\n cb(null, 'Could not post back job results. See /info object');\n } else {\n cb(null, 'Posted job results back to specified URLs');\n }\n });\n\n // Shallow clone for the job.params object\n function clone(theObj) {\n if (null === theObj || 'object' != typeof (theObj)) {\n return theObj;\n }\n\n var copy = theObj.constructor();\n\n for (var attr in theObj) {\n if (theObj.hasOwnProperty(attr)) {\n copy[attr] = theObj[attr];\n }\n }\n return copy;\n }\n}",
"modules": {
"async": "async",
"restify": "restify",
"urlModule": "url"
}
},
{
"name": "cnapi.cleanup_allocation_ticket",
"modules": {
"sdcClients": "sdc-clients"
},
"body": "function releaseAllocationTicket(job, cb) {\n if (!job.allocationTicket) {\n return cb();\n }\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n cnapi.waitlistTicketRelease(job.allocationTicket.uuid, function (err) {\n if (err) {\n job.log.warn({err: err, ticket: job.ticket},\n 'error releasing CNAPI waitlist allocation ticket');\n return;\n }\n cb();\n });\n}"
},
{
"name": "cnapi.cleanup_vm_ticket",
"modules": {
"sdcClients": "sdc-clients"
},
"body": "function releaseVMTicket(job, cb) {\n if (!job.ticket) {\n return cb();\n }\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n cnapi.waitlistTicketRelease(job.ticket.uuid, function (err) {\n if (err) {\n job.log.warn({err: err, ticket: job.ticket},\n 'error releasing CNAPI waitlist VM ticket');\n }\n cb(err);\n });\n}"
},
{
"name": "vmapi.refresh_vm_on_error",
"modules": {
"restify": "restify"
},
"body": "function refreshVm(job, cb) {\n if (!job.params['vm_uuid']) {\n cb('No VM UUID provided');\n return;\n }\n\n if (!vmapiUrl) {\n cb('No VMAPI URL provided');\n return;\n }\n\n /*\n * When job.markAsFailedOnError is set, we won't automatically update the\n * VM in VMAPI to state 'failed'. This is because there may be NICs in use.\n * However, for the case where we have failed to create something correctly,\n * we want to ensure VMAPI gets to the correct state. So we do a GET with\n * sync=true here at the end of the onerror chain to ensure VMAPI's\n * up-to-date. But only when the 'failed' state was not set already.\n */\n if (job.markAsFailedOnError !== false) {\n return cb(null, 'markAsFailedOnError set, not doing sync GET');\n }\n\n var vmapi = restify.createJsonClient({\n url: vmapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var path = '/vms/' + job.params['vm_uuid'] + '?sync=true';\n\n vmapi.get(path, onVmapi);\n\n function onVmapi(err, req, res, vm) {\n if (err) {\n cb(err);\n } else {\n cb(null, 'VM data refreshed, new VM state is ' + vm.state);\n }\n }\n}"
},
{
"name": "cnapi.release_fabric_nat_ticket",
"timeout": 60,
"retry": 1,
"body": "function releaseFabricTicket(job, cb) {\n if (!job.params.ticket) {\n return cb(null, 'No ticket to release');\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n cnapi.waitlistTicketRelease(job.params.ticket, function (err) {\n if (err) {\n if (err.code === 'ResourceNotFound') {\n cb(null, 'Ticket released');\n } else {\n cb(err);\n }\n\n return;\n }\n\n cb(null, 'Released ticket ' + job.params.ticket);\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "On error",
"body": "function (job, cb) {\n return cb('Error executing job');\n }"
}
],
"oncancel": [
{
"name": "vmapi.refresh_vm",
"modules": {
"restify": "restify"
},
"body": "function refreshVm(job, cb) {\n if (!job.params['vm_uuid']) {\n cb('No VM UUID provided');\n return;\n }\n\n if (!vmapiUrl) {\n cb('No VMAPI URL provided');\n return;\n }\n\n /*\n * When job.markAsFailedOnError is set, we won't automatically update the\n * VM in VMAPI to state 'failed'. This is because there may be NICs in use.\n * However, for the case where we have failed to create something correctly,\n * we want to ensure VMAPI gets to the correct state. So we do a GET with\n * sync=true here at the end of the onerror chain to ensure VMAPI's\n * up-to-date. But only when the 'failed' state was not set already.\n */\n if (job.markAsFailedOnError !== false) {\n return cb(null, 'markAsFailedOnError set, not doing sync GET');\n }\n\n var vmapi = restify.createJsonClient({\n url: vmapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var path = '/vms/' + job.params['vm_uuid'] + '?sync=true';\n\n vmapi.get(path, onVmapi);\n\n function onVmapi(err, req, res, vm) {\n if (err) {\n cb(err);\n } else {\n cb(null, 'VM data refreshed, new VM state is ' + vm.state);\n }\n }\n}"
},
{
"name": "cnapi.cleanup_vm_ticket",
"modules": {
"sdcClients": "sdc-clients"
},
"body": "function releaseVMTicket(job, cb) {\n if (!job.ticket) {\n return cb();\n }\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n cnapi.waitlistTicketRelease(job.ticket.uuid, function (err) {\n if (err) {\n job.log.warn({err: err, ticket: job.ticket},\n 'error releasing CNAPI waitlist VM ticket');\n }\n cb(err);\n });\n}"
},
{
"name": "cnapi.cleanup_allocation_ticket",
"modules": {
"sdcClients": "sdc-clients"
},
"body": "function releaseAllocationTicket(job, cb) {\n if (!job.allocationTicket) {\n return cb();\n }\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n cnapi.waitlistTicketRelease(job.allocationTicket.uuid, function (err) {\n if (err) {\n job.log.warn({err: err, ticket: job.ticket},\n 'error releasing CNAPI waitlist allocation ticket');\n return;\n }\n cb();\n });\n}"
},
{
"name": "cnapi.release_fabric_nat_ticket",
"timeout": 60,
"retry": 1,
"body": "function releaseFabricTicket(job, cb) {\n if (!job.params.ticket) {\n return cb(null, 'No ticket to release');\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n cnapi.waitlistTicketRelease(job.params.ticket, function (err) {\n if (err) {\n if (err.code === 'ResourceNotFound') {\n cb(null, 'Ticket released');\n } else {\n cb(err);\n }\n\n return;\n }\n\n cb(null, 'Released ticket ' + job.params.ticket);\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
}
],
"workflow_uuid": "6e15678a-8316-4a07-b712-03248550f24c",
"created_at": "2017-03-07T17:10:08.126Z",
"onerror_results": [
{
"result": "NICs removed",
"error": "",
"name": "napi.cleanup_nics",
"started_at": "2017-03-07T18:10:10.522Z",
"finished_at": "2017-03-07T18:10:10.664Z"
},
{
"result": "Set post back state as failed",
"error": "",
"name": "set_post_back_failed",
"started_at": "2017-03-07T18:10:10.770Z",
"finished_at": "2017-03-07T18:10:10.773Z"
},
{
"result": "Posted job results back to specified URLs",
"error": "",
"name": "common.post_back",
"started_at": "2017-03-07T18:10:10.869Z",
"finished_at": "2017-03-07T18:10:10.903Z"
},
{
"result": "OK",
"error": "",
"name": "cnapi.cleanup_allocation_ticket",
"started_at": "2017-03-07T18:10:10.940Z",
"finished_at": "2017-03-07T18:10:11.157Z"
},
{
"result": "OK",
"error": "",
"name": "cnapi.cleanup_vm_ticket",
"started_at": "2017-03-07T18:10:11.252Z",
"finished_at": "2017-03-07T18:10:11.276Z"
},
{
"result": "markAsFailedOnError set, not doing sync GET",
"error": "",
"name": "vmapi.refresh_vm_on_error",
"started_at": "2017-03-07T18:10:11.388Z",
"finished_at": "2017-03-07T18:10:11.395Z"
},
{
"result": "No ticket to release",
"error": "",
"name": "cnapi.release_fabric_nat_ticket",
"started_at": "2017-03-07T18:10:11.420Z",
"finished_at": "2017-03-07T18:10:11.423Z"
},
{
"result": "",
"error": "Error executing job",
"name": "On error",
"started_at": "2017-03-07T18:10:11.453Z",
"finished_at": "2017-03-07T18:10:11.461Z"
}
],
"oncancel_results": [],
"started": 1488906608214,
"action": "provision",
"nicTags": "[\"sdc_overlay\"]",
"allocationTicket": "{\"uuid\":\"f834d885-2ff3-687b-a0d7-b4d5986a1968\",\"server_uuid\":\"44454c4c-5300-104a-804b-b8c04f524432\",\"scope\":\"vm-allocate\",\"id\":\"global\",\"expires_at\":\"2017-03-07T17:11:08.417Z\",\"created_at\":\"2017-03-07T17:10:08.427Z\",\"updated_at\":\"2017-03-07T17:10:08.427Z\",\"status\":\"active\",\"action\":\"allocate\",\"extra\":{}}",
"ticket": "{\"uuid\":\"231a96f1-6c87-ea1e-9150-ffcfc0086917\",\"server_uuid\":\"44454c4c-5300-1043-804d-b8c04f524432\",\"scope\":\"vm\",\"id\":\"6d11703a-b260-e450-e525-8927879d9bad\",\"expires_at\":\"2017-03-07T17:20:09.111Z\",\"created_at\":\"2017-03-07T17:10:09.131Z\",\"updated_at\":\"2017-03-07T17:10:09.131Z\",\"status\":\"active\",\"action\":\"provision\",\"extra\":{\"workflow_job_uuid\":\"cca1bfec-5b98-4d3f-8b13-41984ac1d5db\",\"owner_uuid\":\"90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3\",\"max_physical_memory\":1024,\"cpu_cap\":84,\"quota\":25,\"brand\":\"lx\"}}",
"taskId": "196cf6c6-b38e-cc1e-85ec-d59e696cfea6",
"postBackState": "failed",
"elapsed": 3603.327,
"uuid": "cca1bfec-5b98-4d3f-8b13-41984ac1d5db"
}
[root@sup-spc-east1a ~]#
[root@sup-spc-east1a ~]# risk 90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3
Account Risk Info
-----------------
Type: ACCOUNT
Login: joyent_acceptance_tester
UUID: 90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3
Email: ops+joyent_acceptance_tester@joyent.com
Name: Joyent Tester Tester
Phone: 555-555-5555
Company: Joyent
Country:
Created: December 7, 2016 06:50:26 PM
Updated: February 13, 2017 08:26:08 PM
Approved: true
AdminUI: https://east1a-adminui.scloud.host/users/90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3
Supmon: https://supmon.joyent.com/m/search-owner.html?owner=90bce6cb-2cd3-e960-80c3-abcbcb0f3cb3
Phone Verification:
Sign Setup:
Account History: null
Risk Score:
Risk Explaination:
Blocking Reason:
[root@sup-spc-east1a ~]#
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment