Skip to content

Instantly share code, notes, and snippets.

@sudhanshu456
Last active March 15, 2023 11:41
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save sudhanshu456/10b8cfd09629ae5bbce9900d8055603e to your computer and use it in GitHub Desktop.
Save sudhanshu456/10b8cfd09629ae5bbce9900d8055603e to your computer and use it in GitHub Desktop.
demoapp.yaml
---
# Source: demo-app/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: service1-demo-app
labels:
helm.sh/chart: demo-app-0.1.0
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service1
app.kubernetes.io/managed-by: Helm
---
# Source: demo-app/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: service1-demo-app
labels:
helm.sh/chart: demo-app-0.1.0
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service1
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: srvhttp
protocol: TCP
name: http
selector:
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service1
---
# Source: demo-app/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: service1-demo-app
labels:
helm.sh/chart: demo-app-0.1.0
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service1
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service1
template:
metadata:
labels:
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service1
spec:
serviceAccountName: service1-demo-app
securityContext:
{}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: service1
topologyKey: kubernetes.io/hostname
containers:
- name: simplesrv
securityContext:
{}
image: "sudhanshu456/demoapp:v0.0.1"
imagePullPolicy: IfNotPresent
ports:
- name: srvhttp
containerPort: 8099
protocol: TCP
env:
# These values are "hardcoded" in envoy proxy config
- name: SIMPLE_SERVICE_PORT
value: "8099"
# Use when running envoy as a proxy (separate from istio deploy)
#- name: ENVOY_EGRESS_PORT
# value: "9001"
- name: HOSTNAME
# TODO: Don't hardcode domain
value: "service1-demo-app.demoapp.svc.cluster.local"
---
# Source: demo-app/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: service2-demo-app
labels:
helm.sh/chart: demo-app-0.1.0
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service2
app.kubernetes.io/managed-by: Helm
---
# Source: demo-app/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: service2-demo-app
labels:
helm.sh/chart: demo-app-0.1.0
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service2
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: srvhttp
protocol: TCP
name: http
selector:
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service2
---
# Source: demo-app/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: service2-demo-app
labels:
helm.sh/chart: demo-app-0.1.0
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service2
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service2
template:
metadata:
labels:
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service2
spec:
serviceAccountName: service2-demo-app
securityContext:
{}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: service2
topologyKey: kubernetes.io/hostname
containers:
- name: simplesrv
securityContext:
{}
image: "sudhanshu456/demoapp:v0.0.1"
imagePullPolicy: IfNotPresent
ports:
- name: srvhttp
containerPort: 8099
protocol: TCP
env:
# These values are "hardcoded" in envoy proxy config
- name: SIMPLE_SERVICE_PORT
value: "8099"
# Use when running envoy as a proxy (separate from istio deploy)
#- name: ENVOY_EGRESS_PORT
# value: "9001"
- name: HOSTNAME
# TODO: Don't hardcode domain
value: "service2-demo-app.demoapp.svc.cluster.local"
---
# Source: demo-app/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: service3-demo-app
labels:
helm.sh/chart: demo-app-0.1.0
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service3
app.kubernetes.io/managed-by: Helm
---
# Source: demo-app/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: service3-demo-app
labels:
helm.sh/chart: demo-app-0.1.0
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service3
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: srvhttp
protocol: TCP
name: http
selector:
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service3
---
# Source: demo-app/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: service3-demo-app
labels:
helm.sh/chart: demo-app-0.1.0
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service3
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service3
template:
metadata:
labels:
app.kubernetes.io/name: demo-app
app.kubernetes.io/instance: service3
spec:
serviceAccountName: service3-demo-app
securityContext:
{}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: service3
topologyKey: kubernetes.io/hostname
containers:
- name: simplesrv
securityContext:
{}
image: "sudhanshu456/demoapp:v0.0.1"
imagePullPolicy: IfNotPresent
ports:
- name: srvhttp
containerPort: 8099
protocol: TCP
env:
# These values are "hardcoded" in envoy proxy config
- name: SIMPLE_SERVICE_PORT
value: "8099"
# Use when running envoy as a proxy (separate from istio deploy)
#- name: ENVOY_EGRESS_PORT
# value: "9001"
- name: HOSTNAME
# TODO: Don't hardcode domain
value: "service3-demo-app.demoapp.svc.cluster.local"
---
kind: ConfigMap
metadata:
name: wavepool-config
apiVersion: v1
data:
test.js: |
import http from "k6/http";
import { check, sleep } from "k6";
import { randomIntBetween } from "https://jslib.k6.io/k6-utils/1.2.0/index.js";
import { vu } from "k6/execution";
export let vuStages = [
{ duration: "10s", target: 5 },
{ duration: "2m", target: 5 },
{ duration: "1m", target: 30 },
{ duration: "2m", target: 30 },
{ duration: "10s", target: 5 },
{ duration: "2m", target: 5 },
];
export let options = {
discardResponseBodies: true,
scenarios: {
guests: {
executor: "ramping-vus",
stages: vuStages,
env: { USER_TYPE: "guest" },
},
subscribers: {
executor: "ramping-vus",
stages: vuStages,
env: { USER_TYPE: "subscriber" },
},
bots: {
executor: "ramping-vus",
stages: vuStages,
env: { USER_TYPE: "bot" },
},
},
};
export default function () {
let userType = __ENV.USER_TYPE;
let userId = vu.idInTest;
const url = "http://service1-demo-app.demoapp.svc.cluster.local/request";
const headers = {
"Content-Type": "application/json",
Cookie:
"session=eyJ1c2VyIjoia2Vub2JpIn0.YbsY4Q.kTaKRTyOIfVlIbNB48d9YH6Q0wo",
"User-Type": userType,
"User-Id": userId,
};
const body = {
request: [
[
{
destination: "service1-demo-app.demoapp.svc.cluster.local",
},
{
destination: "service2-demo-app.demoapp.svc.cluster.local",
},
{
destination: "service3-demo-app.demoapp.svc.cluster.local",
},
],
],
};
let res = http.request("POST", url, JSON.stringify(body), {
headers: headers,
});
const ret = check(res, {
"http status was 200": res.status === 200,
});
if (!ret) {
// sleep for 10ms to 25ms
sleep(randomIntBetween(0.01, 0.025));
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: wavepool-generator
namespace: demoapp
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: wavepool-generator
template:
metadata:
labels:
app.kubernetes.io/component: wavepool-generator
sidecar.istio.io/inject: "false"
spec:
containers:
- args:
- while true; do k6 run -v /tmp/test.js; done
command:
- /bin/sh
- -xc
image: loadimpact/k6:latest
imagePullPolicy: Always
name: wavepool-generator
resources:
limits:
cpu: "1"
memory: 2Gi
volumeMounts:
- mountPath: /tmp
name: graphql-js-file
volumes:
- configMap:
name: wavepool-config
name: graphql-js-file
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment