Skip to content

Instantly share code, notes, and snippets.

@skonto
Created May 29, 2019 11:48
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save skonto/9beb5afa2ec4659ba563cbb0a8b9c4dd to your computer and use it in GitHub Desktop.
Save skonto/9beb5afa2ec4659ba563cbb0a8b9c4dd to your computer and use it in GitHub Desktop.
./bin/spark-submit --master k8s://https://172.31.43.176:8443 \
--deploy-mode cluster \
--name spark-pi \
--class org.apache.spark.examples.SparkPi \
--conf spark.executor.memory=1G \
--conf spark.kubernetes.namespace=spark \
--conf spark.kubernetes.authenticate.driver.serviceAccountName=spark-sa \
--conf spark.driver.memory=1G \
--conf spark.executor.instances=2 \
--conf spark.kubernetes.container.image.pullPolicy=Always \
--conf spark.kubernetes.container.image=lightbend/spark:2.2.0-OpenShift-2.4.3-rh-2.12 local:///opt/spark/examples/jars/spark-examples_2.12-2.4.3.jar 1000000
$ kubectl get pods/spark-pi-1559130109598-driver -n spark -o yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2019-05-29T11:41:50Z"
labels:
spark-app-selector: spark-ab5bb0aa607f4d1ea5d1a5ea650f845c
spark-role: driver
name: spark-pi-1559130109598-driver
namespace: spark
resourceVersion: "1432417"
selfLink: /api/v1/namespaces/spark/pods/spark-pi-1559130109598-driver
uid: bf8b2c1e-8206-11e9-988a-0a5286b46bc8
spec:
containers:
- args:
- driver
- --properties-file
- /opt/spark/conf/spark.properties
- --class
- org.apache.spark.examples.SparkPi
- spark-internal
- "1000000"
env:
- name: SPARK_DRIVER_BIND_ADDRESS
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: SPARK_LOCAL_DIRS
value: /var/data/spark-7b475cc8-44be-4bbd-a793-279e3d98d4af
- name: SPARK_CONF_DIR
value: /opt/spark/conf
image: lightbend/spark:2.2.0-OpenShift-2.4.3-rh-2.12
imagePullPolicy: Always
name: spark-kubernetes-driver
ports:
- containerPort: 7078
name: driver-rpc-port
protocol: TCP
- containerPort: 7079
name: blockmanager
protocol: TCP
- containerPort: 4040
name: spark-ui
protocol: TCP
resources:
limits:
memory: 1408Mi
requests:
cpu: "1"
memory: 1408Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/data/spark-7b475cc8-44be-4bbd-a793-279e3d98d4af
name: spark-local-dir-1
- mountPath: /opt/spark/conf
name: spark-conf-volume
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: spark-sa-token-ptgh9
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: minikube
priority: 0
restartPolicy: Never
schedulerName: default-scheduler
securityContext: {}
serviceAccount: spark-sa
serviceAccountName: spark-sa
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- emptyDir: {}
name: spark-local-dir-1
- configMap:
defaultMode: 420
name: spark-pi-1559130109598-driver-conf-map
name: spark-conf-volume
- name: spark-sa-token-ptgh9
secret:
defaultMode: 420
secretName: spark-sa-token-ptgh9
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2019-05-29T11:41:50Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2019-05-29T11:41:54Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2019-05-29T11:41:54Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2019-05-29T11:41:50Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://be168e40dde85dabc281ac55a5c823dec76a239860e8465e1fe0b23afc8adfc4
image: lightbend/spark:2.2.0-OpenShift-2.4.3-rh-2.12
imageID: docker-pullable://lightbend/spark@sha256:b28886f8bb81fb276fa8b143decb461b4c45ee56bb900ab8b0bdde81b29ac52e
lastState: {}
name: spark-kubernetes-driver
ready: true
restartCount: 0
state:
running:
startedAt: "2019-05-29T11:41:53Z"
hostIP: 172.31.43.176
phase: Running
podIP: 172.17.0.4
qosClass: Burstable
$ kubectl get pods/spark-pi-1559130109598-exec-1 -n spark -o yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2019-05-29T11:41:58Z"
labels:
spark-app-selector: spark-ab5bb0aa607f4d1ea5d1a5ea650f845c
spark-exec-id: "1"
spark-role: executor
name: spark-pi-1559130109598-exec-1
namespace: spark
ownerReferences:
- apiVersion: v1
controller: true
kind: Pod
name: spark-pi-1559130109598-driver
uid: bf8b2c1e-8206-11e9-988a-0a5286b46bc8
resourceVersion: "1432444"
selfLink: /api/v1/namespaces/spark/pods/spark-pi-1559130109598-exec-1
uid: c43b2430-8206-11e9-988a-0a5286b46bc8
spec:
containers:
- args:
- executor
env:
- name: SPARK_DRIVER_URL
value: spark://CoarseGrainedScheduler@spark-pi-1559130109598-driver-svc.spark.svc:7078
- name: SPARK_EXECUTOR_CORES
value: "1"
- name: SPARK_EXECUTOR_MEMORY
value: 1G
- name: SPARK_APPLICATION_ID
value: spark-ab5bb0aa607f4d1ea5d1a5ea650f845c
- name: SPARK_CONF_DIR
value: /opt/spark/conf
- name: SPARK_EXECUTOR_ID
value: "1"
- name: SPARK_EXECUTOR_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: SPARK_LOCAL_DIRS
value: /var/data/spark-7b475cc8-44be-4bbd-a793-279e3d98d4af
image: lightbend/spark:2.2.0-OpenShift-2.4.3-rh-2.12
imagePullPolicy: Always
name: executor
ports:
- containerPort: 7079
name: blockmanager
protocol: TCP
resources:
limits:
memory: 1408Mi
requests:
cpu: "1"
memory: 1408Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/data/spark-7b475cc8-44be-4bbd-a793-279e3d98d4af
name: spark-local-dir-1
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-ntggk
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostname: spark-pi-1559130109598-exec-1
nodeName: minikube
priority: 0
restartPolicy: Never
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- emptyDir: {}
name: spark-local-dir-1
- name: default-token-ntggk
secret:
defaultMode: 420
secretName: default-token-ntggk
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2019-05-29T11:41:58Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2019-05-29T11:42:01Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2019-05-29T11:42:01Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2019-05-29T11:41:58Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://3c88bbd21bfe0078cf36a946945beb0787b74ec6ab7da442d97875920c3b023a
image: lightbend/spark:2.2.0-OpenShift-2.4.3-rh-2.12
imageID: docker-pullable://lightbend/spark@sha256:b28886f8bb81fb276fa8b143decb461b4c45ee56bb900ab8b0bdde81b29ac52e
lastState: {}
name: executor
ready: true
restartCount: 0
state:
running:
startedAt: "2019-05-29T11:42:00Z"
hostIP: 172.31.43.176
phase: Running
podIP: 172.17.0.5
qosClass: Burstable
startTime: "2019-05-29T11:41:58Z"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment