Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
from pprint import pprint
import requests
postdata = {
"cluster_name": "deploy_test_cluster"
,"spark_version": "3.3.x-scala2.11"
,"node_type_id": "r4.xlarge"
,"autoscale":{
"min_workers": 2,
"max_workers": 4,
}
,'ebs_volume_count':1
,'ebs_volume_type': "THROUGHPUT_OPTIMIZED_HDD"
,'ebs_volume_size':20
,"spark_conf": {
"spark.speculation": True
},
"aws_attributes": {
"availability": "SPOT",
"zone_id": "us-east-1c",
'instance_profile_arn':'arn:aws:iam::148546933577:instance-profile/userCluster'
}
}
create = requests.post("https://dbc-db50c5d5-5ae4.cloud.databricks.com/api/2.0/clusters/create"
,auth=('*******','*******'), json=postdata)
pprint(create.json())
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.