Skip to content

Instantly share code, notes, and snippets.

@bhechinger
Created January 20, 2020 16:38
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save bhechinger/a1663d7743691d991d893c7607602e9f to your computer and use it in GitHub Desktop.
Save bhechinger/a1663d7743691d991d893c7607602e9f to your computer and use it in GitHub Desktop.
Job Parameters
{
"image_uuid": "e75c9d82-3156-11ea-9220-c7a6bb9f41b6",
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"brand": "joyent",
"alias": "bind1",
"server_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53",
"billing_id": "f6d8d336-0b94-e9a9-94a2-f1aa15d11611",
"pkgBrand": "",
"quota": 6,
"customer_metadata": {},
"networks": [
{
"primary": true,
"ipv4_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"ipv4_count": 1
}
],
"creator_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"origin": "adminui",
"cpu_cap": 25,
"max_lwps": 4000,
"max_physical_memory": 256,
"max_swap": 1024,
"vcpus": 1,
"zfs_io_priority": 16,
"ram": 256,
"cpu_shares": 16,
"package": {
"name": "sample-256M",
"version": "1.0.0",
"active": true,
"vcpus": 1,
"cpu_cap": 25,
"max_lwps": 4000,
"max_physical_memory": 256,
"max_swap": 1024,
"quota": 6144,
"zfs_io_priority": 16,
"fss": 16,
"billing_tag": "sample-256M",
"uuid": "f6d8d336-0b94-e9a9-94a2-f1aa15d11611",
"created_at": "2020-01-20T15:37:38.365Z",
"updated_at": "2020-01-20T15:37:38.365Z",
"default": false,
"v": 1
},
"image": {
"v": 2,
"uuid": "e75c9d82-3156-11ea-9220-c7a6bb9f41b6",
"owner": "930896af-bf8c-48d4-885c-6573a94b1853",
"name": "base-64-lts",
"version": "19.4.0",
"state": "active",
"disabled": false,
"public": true,
"published_at": "2020-01-07T14:06:30Z",
"type": "zone-dataset",
"os": "smartos",
"files": [
{
"sha1": "68e9c8b45312c7f2b471ca10580de9fd733c7300",
"size": 192623654,
"compression": "gzip"
}
],
"description": "A 64-bit SmartOS image with just essential packages installed. Ideal for users who are comfortable with setting up their own environment and tools.",
"homepage": "https://docs.joyent.com/images/smartos/base",
"urn": "sdc:sdc:base-64-lts:19.4.0",
"requirements": {
"min_platform": {
"7.0": "20141030T081701Z"
},
"networks": [
{
"name": "net0",
"description": "public"
}
]
},
"tags": {
"role": "os",
"group": "base-64-lts"
}
},
"sdc_nat_pool": "f5b28516-6835-4a67-845a-8e92f05dec98",
"firewall_enabled": false,
"sync": false,
"task": "provision",
"vm_uuid": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"current_state": "provisioning",
"x-request-id": "6ab1dd52-11da-4fb8-8b26-05b391b198b7",
"filteredNetworks": {
"netInfo": [
{
"family": "ipv4",
"mtu": 8500,
"nic_tag": "sdc_overlay",
"name": "My-Fabric-Network",
"provision_end_ip": "192.168.131.250",
"provision_start_ip": "192.168.128.5",
"subnet": "192.168.128.0/22",
"uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"vlan_id": 2,
"fabric": true,
"vnet_id": 13760546,
"internet_nat": true,
"gateway_provisioned": false,
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"gateway": "192.168.128.1",
"owner_uuids": [
"930896af-bf8c-48d4-885c-6573a94b1853"
],
"netmask": "255.255.252.0"
}
],
"networks": [
{
"primary": true,
"ipv4_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"ipv4_count": 1
}
],
"fabrics": [
"8deadabb-527e-4f03-9695-3f7c9196a8ac"
],
"pools": [],
"nics": [
{
"belongs_to_type": "zone",
"belongs_to_uuid": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"mac": "90:b8:d0:d3:bd:47",
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"primary": true,
"state": "provisioning",
"created_timestamp": "2020-01-20T16:24:37.922Z",
"modified_timestamp": "2020-01-20T16:24:37.922Z",
"ip": "192.168.128.6",
"fabric": true,
"gateway": "192.168.128.1",
"gateway_provisioned": false,
"internet_nat": true,
"mtu": 8500,
"netmask": "255.255.252.0",
"nic_tag": "sdc_overlay/13760546",
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"vlan_id": 2,
"network_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"cn_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53"
}
]
},
"nics": [
{
"belongs_to_type": "zone",
"belongs_to_uuid": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"mac": "90:b8:d0:d3:bd:47",
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"primary": true,
"state": "provisioning",
"created_timestamp": "2020-01-20T16:24:37.922Z",
"modified_timestamp": "2020-01-20T16:24:37.922Z",
"ip": "192.168.128.6",
"fabric": true,
"gateway": "192.168.128.1",
"gateway_provisioned": false,
"internet_nat": true,
"mtu": 8500,
"netmask": "255.255.252.0",
"nic_tag": "sdc_overlay/13760546",
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"vlan_id": 2,
"network_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"cn_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53"
}
],
"vmTicket": {
"action": "provision",
"created_at": "2020-01-20T16:24:37.154Z",
"expires_at": "2020-01-20T16:34:37.139Z",
"extra": {
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"max_physical_memory": 256,
"cpu_cap": 25,
"quota": 6,
"brand": "joyent"
},
"id": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"scope": "vm",
"server_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53",
"status": "active",
"updated_at": "2020-01-20T16:24:37.301Z",
"uuid": "f14a8e0f-b60e-eddc-8bcc-9d392f1df60c"
},
"fabricNatNics": [
{
"belongs_to_type": "zone",
"belongs_to_uuid": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"mac": "90:b8:d0:d3:bd:47",
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"primary": true,
"state": "provisioning",
"created_timestamp": "2020-01-20T16:24:37.922Z",
"modified_timestamp": "2020-01-20T16:24:37.922Z",
"ip": "192.168.128.6",
"fabric": true,
"gateway": "192.168.128.1",
"gateway_provisioned": false,
"internet_nat": true,
"mtu": 8500,
"netmask": "255.255.252.0",
"nic_tag": "sdc_overlay/13760546",
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"vlan_id": 2,
"network_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"cn_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53"
}
],
"fabricNatTickets": [
{
"nic": {
"belongs_to_type": "zone",
"belongs_to_uuid": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"mac": "90:b8:d0:d3:bd:47",
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"primary": true,
"state": "provisioning",
"created_timestamp": "2020-01-20T16:24:37.922Z",
"modified_timestamp": "2020-01-20T16:24:37.922Z",
"ip": "192.168.128.6",
"fabric": true,
"gateway": "192.168.128.1",
"gateway_provisioned": false,
"internet_nat": true,
"mtu": 8500,
"netmask": "255.255.252.0",
"nic_tag": "sdc_overlay/13760546",
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"vlan_id": 2,
"network_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"cn_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53"
},
"ticket": {
"created_at": "2020-01-20T16:24:40.471Z",
"expires_at": "2020-01-20T16:34:40.406Z",
"extra": {},
"id": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"scope": "fabric_nat",
"server_uuid": "default",
"status": "queued",
"updated_at": "2020-01-20T16:24:40.471Z",
"uuid": "f35a2d67-49b5-4df4-8308-b0d9df1d451e"
}
}
]
}
Info Output
[]
FAILED
Raw Output
{
"uuid": "1f2d80f0-5b1c-45d6-992c-5fb262d61988",
"execution": "failed",
"chain_results": [
{
"result": "All parameters OK!",
"error": "",
"name": "common.validate_params",
"started_at": "2020-01-20T16:24:39.722Z",
"finished_at": "2020-01-20T16:24:39.765Z"
},
{
"result": "Action set",
"error": "",
"name": "workflow.set_job_action",
"started_at": "2020-01-20T16:24:39.902Z",
"finished_at": "2020-01-20T16:24:39.949Z"
},
{
"result": "Image has generate_passwords=true but no users found",
"error": "",
"name": "imgapi.generate_passwords",
"started_at": "2020-01-20T16:24:40.141Z",
"finished_at": "2020-01-20T16:24:40.176Z"
},
{
"result": "Fabric NAT tickets acquired",
"error": "",
"name": "cnapi.acquire_fabric_nat_tickets",
"started_at": "2020-01-20T16:24:40.361Z",
"finished_at": "2020-01-20T16:24:40.733Z"
},
{
"result": "",
"error": "task timeout error",
"name": "napi.provision_fabric_nats",
"started_at": "2020-01-20T16:24:40.922Z",
"finished_at": "2020-01-20T16:26:40.958Z"
}
],
"params": {
"image_uuid": "e75c9d82-3156-11ea-9220-c7a6bb9f41b6",
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"brand": "joyent",
"alias": "bind1",
"server_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53",
"billing_id": "f6d8d336-0b94-e9a9-94a2-f1aa15d11611",
"pkgBrand": "",
"quota": 6,
"customer_metadata": {},
"networks": [
{
"primary": true,
"ipv4_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"ipv4_count": 1
}
],
"creator_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"origin": "adminui",
"cpu_cap": 25,
"max_lwps": 4000,
"max_physical_memory": 256,
"max_swap": 1024,
"vcpus": 1,
"zfs_io_priority": 16,
"ram": 256,
"cpu_shares": 16,
"package": {
"name": "sample-256M",
"version": "1.0.0",
"active": true,
"vcpus": 1,
"cpu_cap": 25,
"max_lwps": 4000,
"max_physical_memory": 256,
"max_swap": 1024,
"quota": 6144,
"zfs_io_priority": 16,
"fss": 16,
"billing_tag": "sample-256M",
"uuid": "f6d8d336-0b94-e9a9-94a2-f1aa15d11611",
"created_at": "2020-01-20T15:37:38.365Z",
"updated_at": "2020-01-20T15:37:38.365Z",
"default": false,
"v": 1
},
"image": {
"v": 2,
"uuid": "e75c9d82-3156-11ea-9220-c7a6bb9f41b6",
"owner": "930896af-bf8c-48d4-885c-6573a94b1853",
"name": "base-64-lts",
"version": "19.4.0",
"state": "active",
"disabled": false,
"public": true,
"published_at": "2020-01-07T14:06:30Z",
"type": "zone-dataset",
"os": "smartos",
"files": [
{
"sha1": "68e9c8b45312c7f2b471ca10580de9fd733c7300",
"size": 192623654,
"compression": "gzip"
}
],
"description": "A 64-bit SmartOS image with just essential packages installed. Ideal for users who are comfortable with setting up their own environment and tools.",
"homepage": "https://docs.joyent.com/images/smartos/base",
"urn": "sdc:sdc:base-64-lts:19.4.0",
"requirements": {
"min_platform": {
"7.0": "20141030T081701Z"
},
"networks": [
{
"name": "net0",
"description": "public"
}
]
},
"tags": {
"role": "os",
"group": "base-64-lts"
}
},
"sdc_nat_pool": "f5b28516-6835-4a67-845a-8e92f05dec98",
"firewall_enabled": false,
"sync": false,
"task": "provision",
"vm_uuid": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"current_state": "provisioning",
"x-request-id": "6ab1dd52-11da-4fb8-8b26-05b391b198b7",
"filteredNetworks": {
"netInfo": [
{
"family": "ipv4",
"mtu": 8500,
"nic_tag": "sdc_overlay",
"name": "My-Fabric-Network",
"provision_end_ip": "192.168.131.250",
"provision_start_ip": "192.168.128.5",
"subnet": "192.168.128.0/22",
"uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"vlan_id": 2,
"fabric": true,
"vnet_id": 13760546,
"internet_nat": true,
"gateway_provisioned": false,
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"gateway": "192.168.128.1",
"owner_uuids": [
"930896af-bf8c-48d4-885c-6573a94b1853"
],
"netmask": "255.255.252.0"
}
],
"networks": [
{
"primary": true,
"ipv4_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"ipv4_count": 1
}
],
"fabrics": [
"8deadabb-527e-4f03-9695-3f7c9196a8ac"
],
"pools": [],
"nics": [
{
"belongs_to_type": "zone",
"belongs_to_uuid": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"mac": "90:b8:d0:d3:bd:47",
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"primary": true,
"state": "provisioning",
"created_timestamp": "2020-01-20T16:24:37.922Z",
"modified_timestamp": "2020-01-20T16:24:37.922Z",
"ip": "192.168.128.6",
"fabric": true,
"gateway": "192.168.128.1",
"gateway_provisioned": false,
"internet_nat": true,
"mtu": 8500,
"netmask": "255.255.252.0",
"nic_tag": "sdc_overlay/13760546",
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"vlan_id": 2,
"network_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"cn_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53"
}
]
},
"nics": [
{
"belongs_to_type": "zone",
"belongs_to_uuid": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"mac": "90:b8:d0:d3:bd:47",
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"primary": true,
"state": "provisioning",
"created_timestamp": "2020-01-20T16:24:37.922Z",
"modified_timestamp": "2020-01-20T16:24:37.922Z",
"ip": "192.168.128.6",
"fabric": true,
"gateway": "192.168.128.1",
"gateway_provisioned": false,
"internet_nat": true,
"mtu": 8500,
"netmask": "255.255.252.0",
"nic_tag": "sdc_overlay/13760546",
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"vlan_id": 2,
"network_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"cn_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53"
}
],
"vmTicket": {
"action": "provision",
"created_at": "2020-01-20T16:24:37.154Z",
"expires_at": "2020-01-20T16:34:37.139Z",
"extra": {
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"max_physical_memory": 256,
"cpu_cap": 25,
"quota": 6,
"brand": "joyent"
},
"id": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"scope": "vm",
"server_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53",
"status": "active",
"updated_at": "2020-01-20T16:24:37.301Z",
"uuid": "f14a8e0f-b60e-eddc-8bcc-9d392f1df60c"
},
"fabricNatNics": [
{
"belongs_to_type": "zone",
"belongs_to_uuid": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"mac": "90:b8:d0:d3:bd:47",
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"primary": true,
"state": "provisioning",
"created_timestamp": "2020-01-20T16:24:37.922Z",
"modified_timestamp": "2020-01-20T16:24:37.922Z",
"ip": "192.168.128.6",
"fabric": true,
"gateway": "192.168.128.1",
"gateway_provisioned": false,
"internet_nat": true,
"mtu": 8500,
"netmask": "255.255.252.0",
"nic_tag": "sdc_overlay/13760546",
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"vlan_id": 2,
"network_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"cn_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53"
}
],
"fabricNatTickets": [
{
"nic": {
"belongs_to_type": "zone",
"belongs_to_uuid": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"mac": "90:b8:d0:d3:bd:47",
"owner_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"primary": true,
"state": "provisioning",
"created_timestamp": "2020-01-20T16:24:37.922Z",
"modified_timestamp": "2020-01-20T16:24:37.922Z",
"ip": "192.168.128.6",
"fabric": true,
"gateway": "192.168.128.1",
"gateway_provisioned": false,
"internet_nat": true,
"mtu": 8500,
"netmask": "255.255.252.0",
"nic_tag": "sdc_overlay/13760546",
"resolvers": [
"8.8.8.8",
"8.8.4.4"
],
"vlan_id": 2,
"network_uuid": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"cn_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53"
},
"ticket": {
"created_at": "2020-01-20T16:24:40.471Z",
"expires_at": "2020-01-20T16:34:40.406Z",
"extra": {},
"id": "8deadabb-527e-4f03-9695-3f7c9196a8ac",
"scope": "fabric_nat",
"server_uuid": "default",
"status": "queued",
"updated_at": "2020-01-20T16:24:40.471Z",
"uuid": "f35a2d67-49b5-4df4-8308-b0d9df1d451e"
}
}
]
},
"image_uuid": "e75c9d82-3156-11ea-9220-c7a6bb9f41b6",
"server_uuid": "ba39c4be-601a-48d2-9662-00bfe8071f53",
"creator_uuid": "930896af-bf8c-48d4-885c-6573a94b1853",
"origin": "adminui",
"task": "provision",
"target": "/provision-f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"vm_uuid": "f66057f9-c904-4d2b-9ce9-ff2fc3c249a3",
"workflow": "b163790a-19d8-49f4-bdc5-bb8855a8c2fa",
"exec_after": null,
"num_attempts": 0,
"name": "provision-8.2.2",
"version": "8.2.2",
"chain": [
{
"name": "common.validate_params",
"timeout": 10,
"retry": 1,
"body": "function validateParams(job, cb) {\n if (napiUrl === undefined) {\n return cb('No NAPI parameters provided');\n }\n\n if (ufdsUrl === undefined || ufdsDn === undefined ||\n ufdsPassword === undefined) {\n return cb('No UFDS parameters provided');\n }\n\n if (cnapiUrl === undefined) {\n return cb('No CNAPI URL provided');\n }\n\n if (fwapiUrl === undefined) {\n return cb('No FWAPI URL provided');\n }\n\n if (imgapiUrl === undefined) {\n return cb('No IMGAPI URL provided');\n }\n\n if (sapiUrl === undefined) {\n return cb('No SAPI URL provided');\n }\n\n if (job.params['owner_uuid'] === undefined) {\n return cb('\\'owner_uuid\\' is required');\n }\n\n if (job.params.brand === undefined) {\n return cb('VM \\'brand\\' is required');\n }\n\n return cb(null, 'All parameters OK!');\n}"
},
{
"name": "workflow.set_job_action",
"timeout": 10,
"retry": 1,
"body": "function setJobAction(job, cb) {\n job.action = 'provision';\n return cb(null, 'Action set');\n}",
"modules": {}
},
{
"name": "imgapi.generate_passwords",
"timeout": 10,
"retry": 1,
"body": "function generatePasswords(job, cb) {\n var log = job.log;\n var execFile = childProcess.execFile;\n var PWD_LENGTH = 12;\n var APG_COMMAND = '/opt/local/bin/apg';\n var APG_ARGS = [\n '-m', PWD_LENGTH,\n '-M', 'SCNL',\n '-n', 1,\n '-E', '\"\\'@$%&*/.:[]\\\\'\n ];\n\n if (job.params.image['generate_passwords'] === false) {\n return cb(null, 'No need to generate passwords for image');\n }\n\n if (job.params.image.users === undefined ||\n !Array.isArray(job.params.image.users)) {\n return cb(null, 'Image has generate_passwords=true but no users found');\n }\n\n if (job.params['internal_metadata'] === undefined) {\n job.params['internal_metadata'] = {};\n }\n\n var users = job.params.image.users;\n var name;\n var password;\n\n async.mapSeries(users, function (user, next) {\n name = user.name + '_pw';\n if (job.params['internal_metadata'][name] === undefined) {\n execFile(APG_COMMAND, APG_ARGS, function (err, stdout, stderr) {\n if (err) {\n log.info({ err: err }, 'Error generating random password');\n return next(err);\n }\n\n password = stdout.toString().replace(/\\n|\\r/g, '');\n job.params['internal_metadata'][name] = password;\n return next();\n });\n } else {\n return next();\n }\n }, function (err) {\n if (err) {\n cb(err, 'Could not generate passwords');\n } else {\n cb(null, 'Passwords generated for Image');\n }\n });\n}",
"modules": {
"childProcess": "child_process",
"async": "async"
}
},
{
"name": "cnapi.acquire_fabric_nat_tickets",
"timeout": 10,
"retry": 1,
"body": "function acquireFabricTickets(job, cb) {\n if (!job.params.fabricNatNics || job.params.fabricNatNics.length === 0) {\n return cb(null, 'No fabric NICs');\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var nics = [];\n var netuuids = [];\n\n job.params.fabricNatTickets = [];\n\n // Uniquify, just in case\n for (var n in job.params.fabricNatNics) {\n if (netuuids.indexOf(job.params.fabricNatNics[n].network_uuid) === -1) {\n nics.push(job.params.fabricNatNics[n]);\n netuuids.push(job.params.fabricNatNics[n].network_uuid);\n }\n }\n\n async.mapSeries(nics, function (nic, next) {\n var newTicket = {\n scope: 'fabric_nat',\n id: nic.network_uuid,\n expires_at: (new Date(\n Date.now() + 600 * 1000).toISOString())\n };\n\n cnapi.waitlistTicketCreate('default', newTicket, onCreate);\n\n function onCreate(err, ticket) {\n if (err) {\n next(err);\n return;\n }\n\n // look up ticket, ensure it's not expired or invalid\n cnapi.waitlistTicketGet(ticket.uuid,\n function (geterr, getticket) {\n if (geterr) {\n next(geterr);\n return;\n }\n\n job.params.fabricNatTickets.push({\n nic: nic,\n ticket: getticket\n });\n job.log.info(\n { nic: nic, ticket: getticket },\n 'ticket status after create');\n next();\n });\n }\n }, function (sErr) {\n if (sErr) {\n cb(sErr);\n } else {\n cb(null, 'Fabric NAT tickets acquired');\n }\n });\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
},
{
"name": "napi.provision_fabric_nats",
"timeout": 120,
"retry": 1,
"body": "function provisionFabricNats(job, cb) {\n if (!job.params.fabricNatTickets ||\n job.params.fabricNatTickets.length === 0) {\n return cb(null, 'No fabric NATs to provision');\n }\n\n if (!job.params.sdc_nat_pool) {\n return cb(new Error('No fabric NAT pool configured for provisioning'));\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var napi = new sdcClients.NAPI({\n url: napiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var natSvc;\n var sapi = new sdcClients.SAPI({\n log: job.log.child({ component: 'sapi' }),\n url: sapiUrl,\n version: '~2',\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n function releaseTicket(tErr, ticket, tCb) {\n cnapi.waitlistTicketRelease(ticket.uuid, function (relErr) {\n if (relErr) {\n job.log.error({ ticket: ticket, err: relErr },\n 'Error releasing ticket');\n }\n\n if (tErr) {\n tCb(tErr);\n return;\n }\n\n tCb(relErr);\n return;\n });\n }\n\n /*\n * Provision a new NAT zone through SAPI on two networks:\n * - the configured NAT network pool\n * - the fabric network that needs a NAT zone\n */\n function provisionNatZone(tick, done) {\n var fabricNic = tick.nic;\n\n // If we were waiting on a ticket because another NAT zone was being\n // provisioned and it succeeded, we don't need to provision another.\n napi.getNetwork(fabricNic.network_uuid, function (netErr, fNet) {\n if (netErr) {\n return done(netErr);\n }\n\n if (fNet.gateway_provisioned) {\n job.log.debug({ ticket: tick.ticket.uuid, net: fNet },\n 'Network already has gateway provisioned');\n tick.gateway_provisioned = true;\n return releaseTicket(null, tick.ticket, done);\n }\n\n var instParams = {\n metadata: {\n 'com.joyent:ipnat_subnet': fNet.subnet\n },\n params: {\n alias: 'nat-' + fabricNic.network_uuid,\n internal_metadata: {\n 'com.joyent:ipnat_owner': job.params.owner_uuid\n },\n networks: [\n {\n uuid: job.params.sdc_nat_pool,\n primary: true,\n allow_ip_spoofing: true\n },\n {\n uuid: fabricNic.network_uuid,\n ip: fabricNic.gateway,\n allow_ip_spoofing: true\n }\n ],\n ticket: tick.ticket.uuid\n }\n };\n\n sapi.createInstanceAsync(natSvc, instParams,\n function _afterSapiProv(createErr, inst) {\n if (createErr) {\n return releaseTicket(createErr, tick.ticket, done);\n }\n\n job.log.info({ instance: inst, natSvc: natSvc },\n 'Created NAT instance');\n\n tick.job_uuid = inst.job_uuid;\n tick.vm_uuid = inst.uuid;\n return done();\n });\n });\n }\n\n sapi.listServices({ name: 'nat' }, function (sapiErr, svcs) {\n if (sapiErr) {\n return cb(sapiErr);\n }\n\n if (!svcs || svcs.length === 0) {\n return cb(new Error('No \"nat\" service found in SAPI'));\n }\n\n if (svcs.length > 1) {\n return cb(new Error('More than one \"nat\" service found in SAPI'));\n }\n\n natSvc = svcs[0].uuid;\n job.log.info({ svc: natSvc, svcs: svcs }, 'svcs');\n\n async.forEach(job.params.fabricNatTickets, function (tick, next) {\n if (tick.ticket.status === 'active') {\n return provisionNatZone(tick, next);\n }\n\n cnapi.waitlistTicketWait(tick.ticket.uuid,\n function _afterWait(tErr) {\n if (tErr) {\n next(tErr);\n } else {\n provisionNatZone(tick, next);\n }\n });\n\n }, function (aErr) {\n if (aErr) {\n cb(aErr);\n } else {\n cb(null, 'Provisioned fabric NATs');\n }\n });\n });\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
},
{
"name": "cnapi.ensure_image",
"timeout": 300,
"retry": 1,
"body": "function ensureImage(job, cb) {\n var commonHeaders = { 'x-request-id': job.params['x-request-id'] };\n var cnapi = new sdcClients.CNAPI({ url: cnapiUrl, headers: commonHeaders });\n\n var ensurePayload = {};\n\n if (['bhyve', 'kvm'].indexOf(job.params['brand']) !== -1) {\n ensurePayload.image_uuid = job.params.disks[0].image_uuid;\n } else {\n ensurePayload.image_uuid = job.params.image_uuid;\n }\n\n if (job.params.imgapiPeers !== undefined) {\n ensurePayload.imgapiPeers = job.params.imgapiPeers;\n }\n\n cnapi.ensureImage(job.params['server_uuid'], ensurePayload,\n function (error, task) {\n if (error) {\n return cb(error);\n }\n\n job.taskId = task.id;\n return cb(null, 'Ensure image task queued!');\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.wait_task_ensure_image",
"timeout": 3600,
"retry": 1,
"body": "function waitTask(job, cb) {\n if (job.params['skip_zone_action']) {\n cb(null, 'Skipping waitTask');\n return;\n }\n\n if (!job.taskId) {\n cb('No taskId provided');\n return;\n }\n\n if (!cnapiUrl) {\n cb('No CNAPI URL provided');\n return;\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n cnapi.waitTask(job.taskId, {}, onTask);\n\n function onTask(err, task) {\n if (err) {\n if (err.statusCode === 404) {\n // fallback to pollTask\n cnapi.pollTask(job.taskId, {}, function (pollerr, polltask) {\n // Make sure loops cannot happen\n if (pollerr && pollerr.statusCode === 404) {\n cb(pollerr);\n return;\n }\n onTask(pollerr, polltask);\n });\n return;\n }\n cb(err);\n } else if (task && task.status == 'failure') {\n cb(getErrorMesage(task));\n } else if (task && task.status == 'complete') {\n // Tasks that modify VM state should add a .vm to the task\n // with something like \"self.finish({ vm: machine });\"\n if (task.history && task.history.length > 0 &&\n task.history[0].name === 'finish' &&\n task.history[0].event) {\n\n var event = task.history[0].event;\n if (event.vm) {\n job.finished_vm = event.vm;\n job.log.debug({vm_uuid: job.finished_vm.uuid},\n 'finish() returned VM');\n }\n\n job.log.debug({event: event}, 'finish() returned event');\n\n if (job.store_task_finish_event_in_attribute) {\n job[job.store_task_finish_event_in_attribute] = event;\n }\n }\n\n cb(null, 'Job succeeded!');\n } else {\n cb(new Error('unexpected task status, ' + task.status));\n }\n }\n\n function getErrorMesage(task) {\n var message;\n var details = [];\n\n if (task.history !== undefined && task.history.length) {\n for (var i = 0; i < task.history.length; i++) {\n var event = task.history[i];\n if (event.name && event.name === 'error' && event.event &&\n event.event.error) {\n var err = event.event.error;\n if (typeof (err) === 'string') {\n message = err;\n if (event.event.details && event.event.details.error) {\n message += ', ' + event.event.details.error;\n }\n } else {\n message = err.message;\n }\n } else if (event.name && event.name === 'finish' &&\n event.event && event.event.log && event.event.log.length) {\n for (var j = 0; j < event.event.log.length; j++) {\n var logEvent = event.event.log[j];\n if (logEvent.level && logEvent.level === 'error') {\n details.push(logEvent.message);\n }\n }\n }\n }\n }\n\n // Apparently the task doesn't have any message for us...\n if (message === undefined) {\n message = 'Unexpected error occured';\n } else if (details.length) {\n message += ': ' + details.join(', ');\n }\n\n return message;\n }\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.wait_for_fabric_nat_provisions",
"timeout": 600,
"retry": 1,
"body": "function waitForFabricNatProvisions(job, cb) {\n if (!job.params.fabricNatTickets ||\n job.params.fabricNatTickets.length === 0) {\n return cb(null, 'No fabric NATs provisioned');\n }\n\n // Filter out tickets that didn't end up needing a gateway provisioned\n var toWaitFor = job.params.fabricNatTickets.filter(function (t) {\n return !t.gateway_provisioned;\n });\n\n if (toWaitFor.length === 0) {\n return cb(null, 'No fabric NAT provisions left to wait for');\n }\n\n var vmapi = new sdcClients.VMAPI({\n url: vmapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n function checkVm(tick, done) {\n var uuid = tick.vm_uuid;\n vmapi.getVm({ uuid: uuid }, onVmapi);\n\n function onVmapi(err, vm, req, res) {\n if (err) {\n cb(err);\n\n } else if (vm.state === 'running') {\n done();\n\n } else if (vm.state === 'failed') {\n done(new Error(\n 'NAT zone \"' + vm.uuid + '\" failed to provision'));\n\n } else {\n setTimeout(checkVm, 1000, tick, done);\n }\n }\n }\n\n async.forEach(toWaitFor, checkVm, function (aErr) {\n if (aErr) {\n cb(aErr);\n } else {\n cb(null, 'Fabric NATs running');\n }\n });\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
},
{
"name": "volapi.provision_nfs_volumes",
"timeout": 120,
"retry": 1,
"body": "function provisionNfsVolumes(job, cb) {\n var requiredVolumes;\n\n if (typeof (volapiUrl) === 'undefined') {\n cb(null, 'URL for volapi service not present, not provisioning NFS ' +\n 'volumes');\n return;\n }\n\n requiredVolumes = job.params.volumes;\n if (requiredVolumes === undefined || requiredVolumes.length === 0) {\n /*\n * No need to provision any volume, because this VM does not\n * require any.\n */\n cb();\n return;\n }\n\n vasync.forEachParallel({\n func: provisionNfsVolume,\n inputs: requiredVolumes\n }, function onAllRequiredVolumesProvisioned(err, results) {\n job.log.info({err: err, results: results},\n 'provisionNfsVolumes results');\n\n if (!err) {\n if (results === null || typeof (results) !== 'object') {\n cb(new Error('results must be an object'));\n return;\n }\n\n if (!Array.isArray(results.operations)) {\n cb(new Error('results.operations must be an array'));\n return;\n }\n\n var createdVolumes = {};\n var operationResultIndex;\n\n for (operationResultIndex in results.operations) {\n var operationResult =\n results.operations[operationResultIndex].result;\n\n if (operationResult === null ||\n typeof (operationResult) !== 'object') {\n cb(new Error('operationResult must be an object'));\n return;\n }\n\n createdVolumes[operationResult.uuid] = operationResult;\n }\n\n job.createdVolumes = createdVolumes;\n cb(null, 'All volumes provisioned');\n } else {\n cb(new verror(err, 'Could not provision volumes'));\n }\n });\n\n /*\n * This function is responsible for:\n *\n * 1. Reserving the volume with name \"volumeName\" for the owner of the VM\n * that is being provisioned.\n *\n * 2. Create that volume if it does not already exist.\n *\n * When this function call its \"callback\" function, it either:\n *\n * 1. succeeded to reserve the volume and create/load it\n *\n * 2. failed to reserve the volume\n *\n * 3. succeeded to reserve the volume, failed to create/load it, and\n * attempted to cancel the reservation.\n *\n * In cases 2 and 3, \"callback\" will be called with an error object as its\n * first argument.\n */\n function provisionNfsVolume(requiredVolume, callback) {\n var vmNics;\n var volumeName;\n\n if (typeof (requiredVolume) !== 'object' || requiredVolume === null) {\n callback(new Error('requiredVolume must be a non-null object'));\n return;\n }\n\n volumeName = requiredVolume.name;\n if (typeof (volumeName) !== 'string') {\n callback(new Error('volumeName must be a string'));\n return;\n }\n\n var volapi = new sdcClients.VOLAPI({\n url: volapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] },\n userAgent: 'workflow/' + job.name\n });\n\n if (job.params !== undefined) {\n vmNics = job.params.nics;\n }\n\n job.log.info({vmNics: vmNics}, 'VM nics');\n\n var provisionContext = {};\n\n vasync.pipeline({arg: provisionContext, funcs: [\n function loadVolume(ctx, next) {\n job.log.info({\n name: volumeName,\n owner_uuid: job.params.owner_uuid\n }, 'Loading volume');\n\n volapi.listVolumes({\n name: volumeName,\n owner_uuid: job.params.owner_uuid,\n predicate: JSON.stringify({\n eq: ['state', 'ready']\n })\n }, function onListVolumes(listVolumesErr, volumes) {\n var errMsg;\n var err;\n\n if (!listVolumesErr) {\n if (volumes && volumes.length > 1) {\n errMsg = 'more than one volume with name '\n + volumeName + ' and owner_uuid: '\n + job.params.owner_uuid + ' when we '\n + 'expected exactly one';\n job.log.error({volumes: volumes},\n 'Error: ' + errMsg);\n\n err = new Error(errMsg);\n next(err);\n return;\n }\n\n if (volumes && volumes.length === 1) {\n job.log.info({\n volume: volumes[0]\n }, 'Found volume');\n ctx.volume = volumes[0];\n } else {\n job.log.info({\n name: volumeName,\n owner_uuid: job.params.owner_uuid\n }, 'Did not find any volume');\n }\n\n next();\n } else {\n job.log.error({\n error: listVolumesErr\n }, 'Error when listing volumes');\n\n /*\n * Ignoring this error for now, since we'll try to\n * create the volume later, and retry to load it if\n * it already exists. If we get an error loading the\n * volume then, we'll make the task fail.\n */\n next();\n }\n });\n },\n function reserve(ctx, next) {\n if (job.params.vm_uuid === undefined) {\n job.params.vm_uuid = uuid.v4();\n }\n\n job.log.info({\n volume_uuid: ctx.volumeUuid,\n job_uuid: job.uuid,\n vm_uuid: job.params.vm_uuid,\n owner_uuid: job.params.owner_uuid\n }, 'Reserving volume');\n\n volapi.createVolumeReservation({\n owner_uuid: job.params.owner_uuid,\n job_uuid: job.uuid,\n vm_uuid: job.params.vm_uuid,\n volume_name: volumeName\n }, function onVolRes(volResErr, volRes) {\n ctx.volumeReservation = volRes;\n next(volResErr);\n });\n },\n function provision(ctx, next) {\n var i;\n var fabricNetworkUuids = [];\n var invalidNics = [];\n var vmNic;\n var volumeCreationParams;\n\n if (ctx.volume) {\n job.log.info({\n volume: ctx.volume\n }, 'Volume already exists, no need to create it');\n next();\n return;\n }\n\n job.log.info('Volume does not exist, creating it');\n\n volumeCreationParams = {\n name: volumeName,\n owner_uuid: job.params.owner_uuid,\n type: 'tritonnfs'\n };\n\n /*\n * If the volume doesn't exist, then we created its uuid\n * beforehand to register a reservation for it, so we must\n * pass that uuid to VOLAPI so that it uses that uuid when\n * creating the volume to match the reservation.\n */\n if (ctx.volumeUuid !== undefined) {\n volumeCreationParams.uuid = ctx.volumeUuid;\n }\n\n /*\n * If the VM being provisioned has nics attached to fabric\n * networks, we'll attach the volume to be provisioned to the\n * same networks. Otherwise, the default fabric network will be\n * picked as a default by VOLAPI.\n */\n for (i = 0; i < vmNics.length; ++i) {\n vmNic = vmNics[i];\n\n if (typeof (vmNic) !== 'object') {\n invalidNics.push(vmNic);\n continue;\n }\n\n if (vmNic.nic_tag &&\n vmNic.nic_tag.indexOf('sdc_overlay/') === 0) {\n fabricNetworkUuids.push(vmNic.network_uuid);\n }\n }\n\n if (invalidNics.length > 0) {\n next('invalid nics: ' + invalidNics);\n return;\n }\n\n volumeCreationParams.networks = fabricNetworkUuids;\n\n volapi.createVolumeAndWait(volumeCreationParams,\n onVolumeCreated);\n\n function onVolumeCreated(volumeCreationErr, createdVolume) {\n if (!volumeCreationErr) {\n job.log.info({createdVolume: createdVolume},\n 'Created new volume');\n ctx.volume = createdVolume;\n next();\n return;\n }\n\n if (volumeCreationErr.restCode === 'VolumeAlreadyExists') {\n job.log.info('Volume with name: ' +\n volumeName + ' already exists, ' +\n 'loading it...');\n\n volapi.listVolumes({\n name: volumeName,\n owner_uuid: job.params.owner_uuid,\n predicate: JSON.stringify({\n or: [\n {eq: ['state', 'ready']},\n {eq: ['state', 'creating']}\n ]\n })\n }, function onListVolumes(listVolumesErr, volumes) {\n var loadedVolume;\n var errMsg;\n var existingVolNumberMismatchErr;\n\n if (listVolumesErr) {\n job.log.error({\n err: listVolumesErr\n }, 'Error when loading existing volume');\n next(listVolumesErr);\n return;\n }\n\n if (!volumes || (volumes && volumes.length !== 1)) {\n errMsg =\n 'Zero or more than one volume with name ' +\n volumeName + ' and ' + 'owner_uuid: ' +\n job.params.owner_uuid + ' when we ' +\n 'expected exactly one';\n\n job.log.error({volumes: volumes}, errMsg);\n\n existingVolNumberMismatchErr =\n new Error(errMsg);\n next(existingVolNumberMismatchErr);\n return;\n }\n\n job.log.info({loadedVolume: loadedVolume},\n 'Loaded existing volume');\n ctx.volume = volumes[0];\n next();\n return;\n });\n } else {\n job.log.error({error: volumeCreationErr},\n 'Failed to created volume');\n next(volumeCreationErr);\n return;\n }\n }\n }]}, function onVolumeProvDone(volProvErr) {\n if (provisionContext.volumeReservation === true &&\n volProvErr !== undefined) {\n job.log.info({\n volumeReservation: provisionContext.volumeReservation\n }, 'Cancelling volume reservation');\n\n volapi.deleteVolumeReservation({\n uuid: provisionContext.volumeReservation.uuid,\n owner_uuid: job.params.owner_uuid\n }, function onVolResRemoved(volResRemoveErr) {\n if (volResRemoveErr) {\n job.log.error({err: volResRemoveErr},\n 'Error when removing volume reservation');\n } else {\n job.log.info('Successfully removed volume ' +\n 'reservation');\n }\n\n callback(volProvErr, provisionContext.volume);\n });\n } else {\n callback(volProvErr, provisionContext.volume);\n }\n });\n }\n}"
},
{
"name": "cnapi.wait_for_nfs_volumes_provisions",
"timeout": 120,
"retry": 1,
"body": "function waitForNfsVolumeProvisions(job, callback) {\n if (job.requiredNfsVolumes !== undefined &&\n !Array.isArray(job.requiredNfsVolumes)) {\n callback(new Error('job.requiredNfsVolumes must be an array if '\n + 'present'));\n return;\n }\n\n if (!job.createdVolumes || Object.keys(job.createdVolumes).length === 0) {\n callback(null, 'No required NFS volume to wait for');\n return;\n }\n\n var volapi = new sdcClients.VOLAPI({\n url: volapiUrl,\n headers: {'x-request-id': job.params['x-request-id']},\n userAgent: 'workflow/' + job.name\n });\n\n vasync.forEachParallel({\n func: function checkVolumeCreated(nfsVolumeUuid, done) {\n if (typeof (nfsVolumeUuid) !== 'string') {\n done(new Error('nfsVolumeUuid must be a string'));\n return;\n }\n\n function checkVolumeReady() {\n volapi.getVolume({\n uuid: nfsVolumeUuid,\n owner_uuid: job.params.owner_uuid\n }, function onGetVolume(getVolumeErr, volume) {\n if (getVolumeErr) {\n done(getVolumeErr);\n return;\n }\n\n if (volume && volume.state === 'ready') {\n job.createdVolumes[volume.uuid] = volume;\n done();\n return;\n }\n\n setTimeout(checkVolumeReady, 1000);\n });\n }\n\n checkVolumeReady();\n },\n inputs: Object.keys(job.createdVolumes)\n }, function allVolumesReady(err, results) {\n if (err) {\n callback(new Error('Could not determine if all required volumes '\n + 'are ready'));\n } else {\n callback(null, 'All required volumes ready');\n }\n });\n}"
},
{
"name": "cnapi.build_nfs_volumes_metadata",
"timeout": 10,
"retry": 1,
"body": "function buildNfsVolumesMetadata(job, callback) {\n job.log.info({createdVolumes: job.createdVolumes},\n 'Building docker:nfsvolumes internal metadata');\n\n if (job.createdVolumes === undefined) {\n // No NFS volume was created, so there's no need to update the\n // docker:nfsvolumes metadata for dockerinit to mount any volume.\n callback(null, 'No NFS volume with which to update VM\\'s internal '\n + 'metadata');\n return;\n }\n\n if (job.createdVolumes === null ||\n typeof (job.createdVolumes) !== 'object') {\n callback(new Error('job.createdVolumes must be an object'));\n return;\n }\n\n if (Object.keys(job.createdVolumes).length === 0) {\n callback(null, 'No NFS volume with which to update VM\\'s internal '\n + 'metadata');\n return;\n }\n\n var createdVolume;\n var foundVolume;\n var requiredVolumes = job.params.volumes;\n var volume;\n var volumeIndex;\n var volumeUuid;\n\n if (!Array.isArray(requiredVolumes)) {\n callback(new Error('requiredVolumes must be an array'));\n return;\n }\n\n for (volumeUuid in job.createdVolumes) {\n createdVolume = job.createdVolumes[volumeUuid];\n\n job.log.info('Updating docker:nfsvolumes metadata entry for volume: '\n + createdVolume.name);\n\n foundVolume = false;\n\n for (volumeIndex in requiredVolumes) {\n volume = requiredVolumes[volumeIndex];\n if (volume && volume.name === createdVolume.name) {\n foundVolume = true;\n break;\n }\n }\n\n if (foundVolume) {\n job.log.info('Adding filesystem_path property ['\n + createdVolume.filesystem_path + '] to '\n + 'required volume: ' + createdVolume.name);\n requiredVolumes[volumeIndex].filesystem_path =\n createdVolume.filesystem_path;\n }\n }\n\n job.nfsVolumesInternalMetadata =\n JSON.stringify(requiredVolumes.map(volumeToNfsInternalMetadata));\n\n callback(null, 'Built docker:nfsvolumes internal_metadata: '\n + job.nfsVolumesInternalMetadata);\n\n function volumeToNfsInternalMetadata(vol) {\n return {\n mode: (vol.mode === undefined) ? 'rw' : vol.mode,\n mountpoint: vol.mountpoint,\n name: vol.name,\n nfsvolume: vol.filesystem_path,\n type: (vol.type === undefined) ? 'tritonnfs' : vol.type\n };\n }\n}"
},
{
"name": "prepare_payload",
"timeout": 10,
"retry": 1,
"body": "function preparePayload(job, cb) {\n job.params.jobid = job.uuid;\n\n var params = job.params;\n var i, j, nic;\n var parsedNfsMetadata;\n var payload = { uuid: params['vm_uuid'], image: job.params.image };\n var wantResolvers = true;\n\n if (payload.image.hasOwnProperty('tags') &&\n payload.image.tags.hasOwnProperty('kernel_version') &&\n !params.hasOwnProperty('kernel_version')) {\n\n params['kernel_version'] = payload.image.tags.kernel_version;\n }\n\n if (payload.image.type === 'lx-dataset') {\n params['brand'] = 'lx';\n }\n\n var keys = [ 'alias', 'autoboot', 'billing_id', 'brand',\n 'cpu_cap', 'cpu_shares', 'customer_metadata',\n 'delegate_dataset', 'dns_domain', 'docker', 'do_not_inventory',\n 'firewall_enabled', 'firewall_rules', 'fs_allowed',\n 'hostname', 'indestructible_zoneroot', 'indestructible_delegated',\n 'init_name', 'internal_metadata', 'kernel_version', 'limit_priv',\n 'maintain_resolvers', 'max_locked_memory', 'max_lwps', 'max_msg_ids',\n 'max_physical_memory', 'max_shm_memory', 'max_sem_ids', 'max_shm_ids',\n 'max_swap', 'mdata_exec_timeout', 'nics',\n 'owner_uuid', 'quota', 'ram',\n 'resolvers', 'vcpus', 'zfs_data_compression', 'zfs_io_priority',\n 'zlog_max_size', 'tags', 'tmpfs'\n ];\n\n for (i = 0; i < keys.length; i++) {\n var key = keys[i];\n if (params[key] !== undefined) {\n payload[key] = params[key];\n }\n }\n\n // Per OS-2520 we always want to be setting archive_on_delete in SDC\n payload['archive_on_delete'] = true;\n\n // If internal_metadata.set_resolvers === false, we always want\n // to leave the resolvers as empty\n if (params.internal_metadata !== undefined &&\n typeof (params.internal_metadata) === 'object' &&\n params.internal_metadata.set_resolvers === false) {\n wantResolvers = false;\n }\n\n // Add NIC resolvers and routes in the order of the networks.\n var resolver;\n var nicResolvers = [];\n var routes = {};\n for (i = 0; i < params.nics.length; i++) {\n nic = params.nics[i];\n\n if (nic['resolvers'] !== undefined &&\n Array.isArray(nic['resolvers'])) {\n for (j = 0; j < nic['resolvers'].length; j++) {\n resolver = nic['resolvers'][j];\n if (nicResolvers.indexOf(resolver) === -1) {\n nicResolvers.push(resolver);\n }\n }\n }\n\n if (nic['routes'] !== undefined &&\n typeof (nic['routes']) === 'object') {\n for (var r in nic['routes']) {\n if (!routes.hasOwnProperty(r)) {\n routes[r] = nic['routes'][r];\n }\n }\n }\n }\n\n if (wantResolvers) {\n if (payload.resolvers) {\n // Resolvers were passed in - do not use the NIC resolvers.\n if (!payload.hasOwnProperty('internal_metadata')) {\n payload.internal_metadata = {};\n }\n payload.internal_metadata.no_nic_resolvers = true;\n } else {\n payload['resolvers'] = nicResolvers;\n }\n }\n\n if (Object.keys(routes).length !== 0) {\n payload['routes'] = routes;\n }\n\n if (['bhyve', 'kvm'].indexOf(params['brand']) !== -1) {\n payload.disks = params.disks;\n var otherProps = ['disk_driver', 'nic_driver', 'cpu_type'];\n if (params['brand'] === 'bhyve') {\n otherProps.push('flexible_disk_size');\n }\n\n otherProps.forEach(function addOtherPropsToPayload(field) {\n if (params[field]) {\n payload[field] = params[field];\n } else {\n payload[field] = job.params.image[field];\n }\n });\n\n // Rely into default vmadm values with `disks` and `flexible_disk_size`\n // for KVM/Bhyve VMs:\n delete payload.quota;\n } else {\n payload['image_uuid'] = params['image_uuid'];\n\n if (params['filesystems'] !== undefined) {\n payload['filesystems'] = params['filesystems'];\n }\n }\n\n if (params.imgapiPeers !== undefined) {\n payload.imgapiPeers = params.imgapiPeers;\n }\n\n if (job.nfsVolumesInternalMetadata !== undefined) {\n job.log.info({\n docker: Boolean(job.params.docker),\n nfsVolumesInternalMetadata: job.nfsVolumesInternalMetadata\n }, 'Setting nfsvolumes internal metadata');\n\n if (!payload.hasOwnProperty('internal_metadata')) {\n payload.internal_metadata = {};\n }\n\n if (job.params.docker === true) {\n // We create a separate copy of the metadata for docker:nfsvolumes,\n // because that needs a 'readonly' parameter instead of 'mode' for\n // historical reasons.\n try {\n parsedNfsMetadata = JSON.parse(job.nfsVolumesInternalMetadata);\n } catch (nfsMetadataParseErr) {\n cb(new VError(nfsMetadataParseErr,\n 'Could not parse NFS volumes metadata'));\n return;\n }\n\n if (!Array.isArray(parsedNfsMetadata)) {\n cb(new Error('parsed nfsvolumes is not an array'));\n return;\n }\n\n // replace .mode = <string> with .readonly = true|false\n parsedNfsMetadata.forEach(function _eachVol(volObj) {\n volObj.readonly = (volObj.mode === 'ro');\n delete volObj.mode;\n });\n\n payload.internal_metadata['docker:nfsvolumes']\n = JSON.stringify(parsedNfsMetadata);\n }\n\n payload.internal_metadata['sdc:volumes'] =\n job.nfsVolumesInternalMetadata;\n }\n\n job.params.payload = payload;\n cb(null, 'Payload prepared successfully');\n}",
"modules": {
"sdcClients": "sdc-clients",
"VError": "verror"
}
},
{
"name": "cnapi.provision_vm",
"timeout": 10,
"retry": 1,
"body": "function provision(job, cb) {\n delete job.params.skip_zone_action;\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n job.params.jobid = job.uuid;\n\n // autoboot=false means we want the machine to not to boot after provision\n if (job.params.autoboot === false || job.params.autoboot === 'false') {\n job.expects = 'stopped';\n } else {\n job.expects = 'running';\n }\n\n var server = job.params['server_uuid'];\n\n if (job.params.internal_metadata &&\n job.params.internal_metadata['force_provision_failure']) {\n\n cb('force_provision_failure set, failing');\n return;\n }\n\n return cnapi.createVm(server, job.params.payload, function (err, task) {\n if (err) {\n return cb(err);\n } else {\n job.taskId = task.id;\n // As soon was we reach this point, we don't want to clean up NICs\n // when a provision fails\n job.markAsFailedOnError = false;\n return cb(null, 'Provision task: ' + task.id + ' queued!');\n }\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.wait_task",
"timeout": 3600,
"retry": 1,
"body": "function waitTask(job, cb) {\n if (job.params['skip_zone_action']) {\n cb(null, 'Skipping waitTask');\n return;\n }\n\n if (!job.taskId) {\n cb('No taskId provided');\n return;\n }\n\n if (!cnapiUrl) {\n cb('No CNAPI URL provided');\n return;\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n cnapi.waitTask(job.taskId, {}, onTask);\n\n function onTask(err, task) {\n if (err) {\n if (err.statusCode === 404) {\n // fallback to pollTask\n cnapi.pollTask(job.taskId, {}, function (pollerr, polltask) {\n // Make sure loops cannot happen\n if (pollerr && pollerr.statusCode === 404) {\n cb(pollerr);\n return;\n }\n onTask(pollerr, polltask);\n });\n return;\n }\n cb(err);\n } else if (task && task.status == 'failure') {\n cb(getErrorMesage(task));\n } else if (task && task.status == 'complete') {\n // Tasks that modify VM state should add a .vm to the task\n // with something like \"self.finish({ vm: machine });\"\n if (task.history && task.history.length > 0 &&\n task.history[0].name === 'finish' &&\n task.history[0].event) {\n\n var event = task.history[0].event;\n if (event.vm) {\n job.finished_vm = event.vm;\n job.log.debug({vm_uuid: job.finished_vm.uuid},\n 'finish() returned VM');\n }\n\n job.log.debug({event: event}, 'finish() returned event');\n\n if (job.store_task_finish_event_in_attribute) {\n job[job.store_task_finish_event_in_attribute] = event;\n }\n }\n\n cb(null, 'Job succeeded!');\n } else {\n cb(new Error('unexpected task status, ' + task.status));\n }\n }\n\n function getErrorMesage(task) {\n var message;\n var details = [];\n\n if (task.history !== undefined && task.history.length) {\n for (var i = 0; i < task.history.length; i++) {\n var event = task.history[i];\n if (event.name && event.name === 'error' && event.event &&\n event.event.error) {\n var err = event.event.error;\n if (typeof (err) === 'string') {\n message = err;\n if (event.event.details && event.event.details.error) {\n message += ', ' + event.event.details.error;\n }\n } else {\n message = err.message;\n }\n } else if (event.name && event.name === 'finish' &&\n event.event && event.event.log && event.event.log.length) {\n for (var j = 0; j < event.event.log.length; j++) {\n var logEvent = event.event.log[j];\n if (logEvent.level && logEvent.level === 'error') {\n details.push(logEvent.message);\n }\n }\n }\n }\n }\n\n // Apparently the task doesn't have any message for us...\n if (message === undefined) {\n message = 'Unexpected error occured';\n } else if (details.length) {\n message += ': ' + details.join(', ');\n }\n\n return message;\n }\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "volapi.add_volumes_references",
"timeout": 120,
"retry": 1,
"body": "function addVolumesReferences(job, callback) {\n var createdVolumeUuids = [];\n\n if (job.createdVolumes !== undefined &&\n typeof (job.createdVolumes) !== 'object') {\n callback(new Error('job.createdVolumes must be an object if '\n + 'present'));\n return;\n }\n\n if (job.createdVolumes) {\n createdVolumeUuids = Object.keys(job.createdVolumes);\n }\n\n if (createdVolumeUuids.length === 0) {\n callback(null,\n 'No created NFS volume to which a reference needs to be added');\n return;\n }\n\n var volapi = new sdcClients.VOLAPI({\n url: volapiUrl,\n headers: {'x-request-id': job.params['x-request-id']},\n userAgent: 'workflow/' + job.name\n });\n\n var vmUuid = job.params.vm_uuid;\n\n vasync.forEachParallel({\n func: function addVolReference(volUuid, done) {\n volapi.addVolumeReference({\n owner_uuid: job.params.owner_uuid,\n vm_uuid: vmUuid,\n volume_uuid: volUuid\n }, done);\n },\n inputs: createdVolumeUuids\n }, function onRefsAdded(refsAddErr) {\n if (refsAddErr) {\n callback(new Error('Could not add references from VM ' + vmUuid +\n ' to volumes ' + createdVolumeUuids));\n } else {\n callback(null, 'References from VM ' + vmUuid + ' to volumes ' +\n createdVolumeUuids + ' added successfully');\n }\n });\n}",
"modules": {
"sdcClients": "sdc-clients",
"vasync": "vasync"
}
},
{
"name": "vmapi.put_vm",
"timeout": 120,
"retry": 1,
"body": "function putVm(job, cb) {\n var vmapi;\n\n if (job.params.do_not_inventory) {\n cb(null,\n 'VM has do_not_inventory set, no need to putVm to VMAPI');\n return;\n }\n\n /*\n * Checks (polls) the state of a machine in VMAPI. It is used for provisions\n * and VM actions such as reboot and shutdown.\n *\n * IMPORTANT: this function an all uses of job.expects are deprecated and\n * will be removed in a future version after everyone is updated\n * past the old agent tasks that don't pass back the VMs. It is\n * being replaced with the putVm function and is now only called\n * from there.\n */\n function checkState(_job, _cb) {\n if (_job.params['skip_zone_action']) {\n _cb(null, 'Skipping checkState');\n return;\n }\n\n // For now don't fail the job if this parameter is not present\n if (!_job.expects) {\n _cb(null, 'No \\'expects\\' state parameter provided');\n return;\n }\n\n if (!_job.params['vm_uuid']) {\n _cb('No VM UUID provided');\n return;\n }\n\n if (!vmapiUrl) {\n _cb('No VMAPI URL provided');\n return;\n }\n\n var _vmapi = new sdcClients.VMAPI({\n url: vmapiUrl,\n headers: { 'x-request-id': _job.params['x-request-id'] }\n });\n\n // Repeat checkVm until VM data is updated\n checkVm();\n\n function checkVm() {\n _vmapi.getVm({ uuid: _job.params['vm_uuid'] }, onVmapi);\n\n function onVmapi(err, vm, req, res) {\n if (err) {\n _cb(err);\n } else if (vm.state == _job.expects) {\n _cb(null, 'VM is now ' + _job.expects);\n } else {\n if (_job.timeToDie) {\n _job.log.error('checkState.checkVm.onVmapi called after'\n + ' task completion, breaking loop');\n return;\n }\n setTimeout(checkVm, 1000);\n }\n }\n }\n }\n\n if (!job.finished_vm) {\n job.log.warn({req_id: job.params['x-request-id']},\n 'putVM() called but job.finished_vm is missing');\n\n checkState(job, cb);\n //\n // When checkState is removed:\n //\n // cb(null, 'job has no finished_vm, nothing to post to VMAPI');\n return;\n }\n\n if (!vmapiUrl) {\n cb(new Error('No VMAPI URL provided'));\n return;\n }\n\n job.log.debug({vmobj: job.finished_vm}, 'putVM() putting VM to VMAPI');\n\n //\n // Borrowed from vm-agent lib/vmapi-client.js\n //\n // DO NOT TRY THIS AT HOME!\n //\n // afaict the reason sdcClients does not have a putVm function in the first\n // place is that this is not something API clients should generally be\n // doing. WE need to do it, and vm-agent needs to do it, but other clients\n // should not be doing it unless they're absolutely sure that what they're\n // PUTing is the current state.\n //\n // We know that here because cn-agent tasks just did a VM.load for us.\n //\n sdcClients.VMAPI.prototype.putVm = function (vm, callback) {\n var log = job.log;\n var opts = { path: '/vms/' + vm.uuid };\n\n this.client.put(opts, vm, function (err, req, res) {\n if (err) {\n log.error(err, 'Could not update VM %s', vm.uuid);\n return callback(err);\n }\n\n log.info('VM (uuid=%s, state=%s, last_modified=%s) updated @ VMAPI',\n vm.uuid, vm.state, vm.last_modified);\n return callback();\n });\n };\n\n vmapi = new sdcClients.VMAPI({\n log: job.log,\n url: vmapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n vmapi.putVm(job.finished_vm, function (err) {\n if (err) {\n cb(err);\n return;\n }\n\n cb(null, 'put VM ' + job.finished_vm.uuid + ' to VMAPI');\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "fwapi.update",
"timeout": 10,
"retry": 1,
"body": "function updateFwapi(job, cb) {\n var fwapi = new sdcClients.FWAPI({\n url: fwapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var jobParams = job.params.payload || job.params;\n var type;\n var update = {};\n var vmProps = ['add_nics', 'firewall_enabled', 'nics', 'remove_ips',\n 'remove_nics', 'remove_tags', 'set_tags', 'tags'];\n\n if (job.params.task === 'provision') {\n type = 'vm.add';\n } else {\n type = (job.params.task === 'destroy') ? 'vm.delete' : 'vm.update';\n }\n\n vmProps.forEach(function (prop) {\n if (jobParams.hasOwnProperty(prop)) {\n update[prop] = jobParams[prop];\n }\n });\n\n job.log.info({ jobParams: jobParams, update: update }, 'update params');\n\n if (Object.keys(update).length === 0 && job.params.task !== 'destroy') {\n cb(null, 'No properties affecting FWAPI found: not updating');\n return;\n }\n\n update.owner_uuid = jobParams.owner_uuid || job.params.owner_uuid;\n update.server_uuid = jobParams.server_uuid || job.params.server_uuid;\n update.type = type;\n update.uuid = jobParams.uuid || jobParams.vm_uuid || job.params.vm_uuid;\n\n fwapi.createUpdate(update, function (err, obj) {\n if (err) {\n job.log.warn(err, 'Error sending update to FWAPI');\n cb(null, 'Error updating FWAPI');\n return;\n }\n\n cb(null, 'Updated FWAPI with update UUID: ' + obj.update_uuid);\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.release_vm_ticket",
"timeout": 60,
"retry": 1,
"body": "function releaseVMTicket(job, cb) {\n var ticket = job.params.vmTicket;\n\n if (!ticket) {\n cb();\n return;\n }\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n cnapi.waitlistTicketRelease(ticket.uuid, function (err) {\n if (err) {\n job.log.warn({err: err, ticket: ticket},\n 'error releasing CNAPI waitlist VM ticket');\n }\n cb(err);\n });\n}",
"modules": {
"sdcClients": "sdc-clients"
}
},
{
"name": "cnapi.release_fabric_nat_tickets",
"timeout": 60,
"retry": 1,
"body": "function releaseFabricNatTickets(job, cb) {\n if (!job.params.fabricNatTickets ||\n job.params.fabricNatTickets.length === 0) {\n return cb(null, 'No fabric NAT tickets to release');\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n async.forEach(job.params.fabricNatTickets, function (tick, next) {\n cnapi.waitlistTicketRelease(tick.ticket.uuid, function (err) {\n if (err && err.code !== 'ResourceNotFound') {\n job.log.warn({ticket: tick.ticket},\n 'Unable to release CNAPI NAT ticket');\n next(err);\n return;\n }\n next();\n });\n }, cb);\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
}
],
"timeout": 3810,
"onerror": [
{
"name": "napi.cleanup_nics",
"timeout": 10,
"retry": 1,
"body": "function cleanupNics(job, cb) {\n // If this is false it means that cnapi.pollTask succeeded, so the VM exists\n // physically wether its provision failed or not\n if (job.markAsFailedOnError === false) {\n return cb(null, 'markAsFailedOnError was set to false, ' +\n 'won\\'t cleanup VM NICs');\n }\n\n var macs = job.params.macs;\n\n if (!macs) {\n /*\n * filteredNetworks.nics will contain any pre provisioned NICs. If\n * the workflow fails early enough the other job.params fields will\n * not yet have been populated yet, but we still have some NICs that\n * need to be removed.\n */\n var nics = job.params['add_nics'] || job.params['nics'] ||\n job.params.filteredNetworks.nics;\n\n if (!nics) {\n return cb(null, 'No MACs given, and no NICs were provisioned');\n }\n\n macs = nics.map(function (nic) { return nic.mac; });\n }\n\n var napi = new sdcClients.NAPI({\n url: napiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n async.mapSeries(macs, function (mac, next) {\n napi.deleteNic(mac, next);\n }, function (err) {\n if (err) {\n cb(err);\n } else {\n cb(null, 'NICs removed');\n }\n });\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
},
{
"name": "set_post_back_failed",
"body": "function setPostBackFailed(job, cb) {\n // If this is false it means that cnapi.waitTask succeeded, so the VM exists\n // physically wether its provision failed or not\n if (job.markAsFailedOnError === false) {\n return cb(null, 'markAsFailedOnError was set to false, ' +\n 'won\\'t set postBackState for VM');\n }\n\n job.postBackState = 'failed';\n return cb(null, 'Set post back state as failed');\n}",
"modules": {}
},
{
"name": "common.post_back",
"body": "function postBack(job, cb) {\n if (job.markAsFailedOnError === false) {\n return cb(null, 'markAsFailedOnError was set to false, ' +\n 'won\\'t postBack provision failure to VMAPI');\n }\n\n var urls = job.params['post_back_urls'];\n var vmapiPath = vmapiUrl + '/job_results';\n\n // By default, post back to VMAPI\n if (urls === undefined || !Array.isArray(urls)) {\n urls = [ vmapiPath ];\n } else {\n urls.push(vmapiPath);\n }\n\n var obj = clone(job.params);\n obj.execution = job.postBackState || 'succeeded';\n\n async.mapSeries(urls, function (url, next) {\n var p = urlModule.parse(url);\n var api = restify.createJsonClient({\n url: p.protocol + '//' + p.host,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n api.post(p.pathname, obj, onResponse);\n\n function onResponse(err, req, res) {\n return next(err);\n }\n\n }, function (err2) {\n if (err2) {\n var errObject = { err: err2, urls: urls };\n job.log.info(errObject, 'Error posting back to URLs');\n cb(null, 'Could not post back job results. See /info object');\n } else {\n cb(null, 'Posted job results back to specified URLs');\n }\n });\n\n // Shallow clone for the job.params object\n function clone(theObj) {\n if (null === theObj || 'object' != typeof (theObj)) {\n return theObj;\n }\n\n var copy = theObj.constructor();\n\n for (var attr in theObj) {\n if (theObj.hasOwnProperty(attr)) {\n copy[attr] = theObj[attr];\n }\n }\n return copy;\n }\n}",
"modules": {
"async": "async",
"restify": "restify",
"urlModule": "url"
}
},
{
"name": "cnapi.cleanup_allocation_ticket",
"modules": {
"sdcClients": "sdc-clients"
},
"body": "function releaseAllocationTicket(job, cb) {\n var ticket = job.allocationTicket;\n\n if (!ticket) {\n return cb();\n }\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n cnapi.waitlistTicketRelease(ticket.uuid, function (err) {\n if (err) {\n job.log.warn({err: err, ticket: ticket},\n 'error releasing CNAPI waitlist allocation ticket');\n return;\n }\n cb();\n });\n}"
},
{
"name": "cnapi.cleanup_vm_ticket",
"modules": {
"sdcClients": "sdc-clients"
},
"body": "function releaseVMTicket(job, cb) {\n var ticket = job.params.vmTicket;\n\n if (!ticket) {\n cb();\n return;\n }\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n cnapi.waitlistTicketRelease(ticket.uuid, function (err) {\n if (err) {\n job.log.warn({err: err, ticket: ticket},\n 'error releasing CNAPI waitlist VM ticket');\n }\n cb(err);\n });\n}"
},
{
"name": "vmapi.refresh_vm_on_error",
"modules": {
"restify": "restify"
},
"body": "function refreshVm(job, cb) {\n if (!job.params['vm_uuid']) {\n cb('No VM UUID provided');\n return;\n }\n\n if (!vmapiUrl) {\n cb('No VMAPI URL provided');\n return;\n }\n\n /*\n * When job.markAsFailedOnError is set, we won't automatically update the\n * VM in VMAPI to state 'failed'. This is because there may be NICs in use.\n * However, for the case where we have failed to create something correctly,\n * we want to ensure VMAPI gets to the correct state. So we do a GET with\n * sync=true here at the end of the onerror chain to ensure VMAPI's\n * up-to-date. But only when the 'failed' state was not set already.\n */\n if (job.markAsFailedOnError !== false) {\n return cb(null, 'markAsFailedOnError set, not doing sync GET');\n }\n\n var vmapi = restify.createJsonClient({\n url: vmapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var path = '/vms/' + job.params['vm_uuid'] + '?sync=true';\n\n vmapi.get(path, onVmapi);\n\n function onVmapi(err, req, res, vm) {\n if (err) {\n cb(err);\n } else {\n cb(null, 'VM data refreshed, new VM state is ' + vm.state);\n }\n }\n}"
},
{
"name": "cnapi.release_fabric_nat_tickets",
"timeout": 60,
"retry": 1,
"body": "function releaseFabricNatTickets(job, cb) {\n if (!job.params.fabricNatTickets ||\n job.params.fabricNatTickets.length === 0) {\n return cb(null, 'No fabric NAT tickets to release');\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n async.forEach(job.params.fabricNatTickets, function (tick, next) {\n cnapi.waitlistTicketRelease(tick.ticket.uuid, function (err) {\n if (err && err.code !== 'ResourceNotFound') {\n job.log.warn({ticket: tick.ticket},\n 'Unable to release CNAPI NAT ticket');\n next(err);\n return;\n }\n next();\n });\n }, cb);\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
},
{
"name": "On error",
"body": "function (job, cb) {\n return cb('Error executing job');\n }"
}
],
"oncancel": [
{
"name": "vmapi.refresh_vm",
"modules": {
"restify": "restify"
},
"body": "function refreshVm(job, cb) {\n if (!job.params['vm_uuid']) {\n cb('No VM UUID provided');\n return;\n }\n\n if (!vmapiUrl) {\n cb('No VMAPI URL provided');\n return;\n }\n\n /*\n * When job.markAsFailedOnError is set, we won't automatically update the\n * VM in VMAPI to state 'failed'. This is because there may be NICs in use.\n * However, for the case where we have failed to create something correctly,\n * we want to ensure VMAPI gets to the correct state. So we do a GET with\n * sync=true here at the end of the onerror chain to ensure VMAPI's\n * up-to-date. But only when the 'failed' state was not set already.\n */\n if (job.markAsFailedOnError !== false) {\n return cb(null, 'markAsFailedOnError set, not doing sync GET');\n }\n\n var vmapi = restify.createJsonClient({\n url: vmapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n var path = '/vms/' + job.params['vm_uuid'] + '?sync=true';\n\n vmapi.get(path, onVmapi);\n\n function onVmapi(err, req, res, vm) {\n if (err) {\n cb(err);\n } else {\n cb(null, 'VM data refreshed, new VM state is ' + vm.state);\n }\n }\n}"
},
{
"name": "cnapi.cleanup_vm_ticket",
"modules": {
"sdcClients": "sdc-clients"
},
"body": "function releaseVMTicket(job, cb) {\n var ticket = job.params.vmTicket;\n\n if (!ticket) {\n cb();\n return;\n }\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n cnapi.waitlistTicketRelease(ticket.uuid, function (err) {\n if (err) {\n job.log.warn({err: err, ticket: ticket},\n 'error releasing CNAPI waitlist VM ticket');\n }\n cb(err);\n });\n}"
},
{
"name": "cnapi.cleanup_allocation_ticket",
"modules": {
"sdcClients": "sdc-clients"
},
"body": "function releaseAllocationTicket(job, cb) {\n var ticket = job.allocationTicket;\n\n if (!ticket) {\n return cb();\n }\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n cnapi.waitlistTicketRelease(ticket.uuid, function (err) {\n if (err) {\n job.log.warn({err: err, ticket: ticket},\n 'error releasing CNAPI waitlist allocation ticket');\n return;\n }\n cb();\n });\n}"
},
{
"name": "cnapi.release_fabric_nat_tickets",
"timeout": 60,
"retry": 1,
"body": "function releaseFabricNatTickets(job, cb) {\n if (!job.params.fabricNatTickets ||\n job.params.fabricNatTickets.length === 0) {\n return cb(null, 'No fabric NAT tickets to release');\n }\n\n var cnapi = new sdcClients.CNAPI({\n url: cnapiUrl,\n headers: { 'x-request-id': job.params['x-request-id'] }\n });\n\n async.forEach(job.params.fabricNatTickets, function (tick, next) {\n cnapi.waitlistTicketRelease(tick.ticket.uuid, function (err) {\n if (err && err.code !== 'ResourceNotFound') {\n job.log.warn({ticket: tick.ticket},\n 'Unable to release CNAPI NAT ticket');\n next(err);\n return;\n }\n next();\n });\n }, cb);\n}",
"modules": {
"sdcClients": "sdc-clients",
"async": "async"
}
}
],
"workflow_uuid": "b163790a-19d8-49f4-bdc5-bb8855a8c2fa",
"created_at": "2020-01-20T16:24:38.750Z",
"onerror_results": [
{
"result": "NICs removed",
"error": "",
"name": "napi.cleanup_nics",
"started_at": "2020-01-20T16:26:41.256Z",
"finished_at": "2020-01-20T16:26:41.758Z"
},
{
"result": "Set post back state as failed",
"error": "",
"name": "set_post_back_failed",
"started_at": "2020-01-20T16:26:41.896Z",
"finished_at": "2020-01-20T16:26:41.936Z"
},
{
"result": "Posted job results back to specified URLs",
"error": "",
"name": "common.post_back",
"started_at": "2020-01-20T16:26:42.076Z",
"finished_at": "2020-01-20T16:26:42.303Z"
},
{
"result": "OK",
"error": "",
"name": "cnapi.cleanup_allocation_ticket",
"started_at": "2020-01-20T16:26:42.435Z",
"finished_at": "2020-01-20T16:26:42.476Z"
},
{
"result": "OK",
"error": "",
"name": "cnapi.cleanup_vm_ticket",
"started_at": "2020-01-20T16:26:42.586Z",
"finished_at": "2020-01-20T16:26:42.789Z"
},
{
"result": "markAsFailedOnError set, not doing sync GET",
"error": "",
"name": "vmapi.refresh_vm_on_error",
"started_at": "2020-01-20T16:26:42.926Z",
"finished_at": "2020-01-20T16:26:42.973Z"
},
{
"result": "OK",
"error": "",
"name": "cnapi.release_fabric_nat_tickets",
"started_at": "2020-01-20T16:26:43.046Z",
"finished_at": "2020-01-20T16:26:43.305Z"
},
{
"result": "",
"error": "Error executing job",
"name": "On error",
"started_at": "2020-01-20T16:26:43.435Z",
"finished_at": "2020-01-20T16:26:43.476Z"
}
],
"oncancel_results": [],
"started": 1579537479722,
"action": "provision",
"postBackState": "failed",
"elapsed": 123.934
}
FAILED
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment