Skip to content

Instantly share code, notes, and snippets.

@john-a-joyce
Last active March 30, 2022 21:00
Show Gist options
  • Save john-a-joyce/89e06c2a5cf7a74faa4f9b4a47676af3 to your computer and use it in GitHub Desktop.
Save john-a-joyce/89e06c2a5cf7a74faa4f9b4a47676af3 to your computer and use it in GitHub Desktop.
---
apiVersion: v1
kind: Service
metadata:
name: istiod-cp-v111x
namespace: istio-system
spec:
clusterIP: None
ports:
- name: grpc-xds
port: 15010
protocol: TCP
targetPort: 15010
- name: https-dns
port: 15012
protocol: TCP
targetPort: 15012
- name: https-webhook
port: 443
protocol: TCP
targetPort: 15017
- name: http-monitoring
port: 15014
protocol: TCP
targetPort: 15014
sessionAffinity: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: istiod-cp-v111x
namespace: istio-system
subsets:
- addresses:
- ip: 172.18.251.1
ports:
- name: https-dns
port: 15012
protocol: TCP
- name: grpc-xds
port: 15010
protocol: TCP
- name: https-webhook
port: 15017
protocol: TCP
- name: http-monitoring
port: 15014
protocol: TCP
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
gateway-name: edge-gateway
istio.io/rev: cp-v111x.istio-system
name: edge-gateway
#namespace: "istio-system"
spec:
externalTrafficPolicy: Cluster
ports:
- name: tcp-status-port
port: 15021
protocol: TCP
targetPort: 15021
- name: tls-istiod
port: 15012
protocol: TCP
targetPort: 15012
- name: http2
port: 80
targetPort: 80
- name: https
port: 443
targetPort: 443
- name: tls
port: 15443
protocol: TCP
targetPort: 15443
selector:
app: edge-gateway
gateway-name: edge-gateway
istio.io/rev: cp-v111x.istio-system
sessionAffinity: None
type: LoadBalancer
status:
loadBalancer: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
gateway-name: edge-gateway
istio.io/rev: cp-v111x.istio-system
name: edge-gateway
#namespace: "istio-system"
spec:
replicas: 1
selector:
matchLabels:
app: edge-gateway
gateway-name: edge-gateway
istio.io/rev: cp-v111x.istio-system
strategy: {}
template:
metadata:
annotations:
# Select the gateway injection template (rather than the default sidecar template)
inject.istio.io/templates: gateway
labels:
app: edge-gateway
gateway-name: edge-gateway
istio.io/rev: cp-v111x.istio-system
sidecar.istio.io/inject: "true"
spec:
containers:
- env:
- name: ISTIO_META_LOCAL_ENDPOINTS_ONLY
value: "false"
- name: JWT_POLICY
value: first-party-jwt
# adding custom provider setting
# This might be required to force the agent to use the file at /etc/certs like the VM case
# but it doesn't look like that is required.
#- name: PILOT_CERT_PROVIDER
# value: custom
imagePullPolicy: IfNotPresent
# image: auto
#image: 033498657557.dkr.ecr.us-east-2.amazonaws.com/banzaicloud/istio-proxyv2:v1.11.4-bzc.1-dbg
# change to debug image
image: docker.io/library/proxy-agent:debug
name: istio-proxy
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: workload-token
#- mountPath: /etc/certs
# name: istiod-ca-root-cert
serviceAccountName: default
automountServiceAccountToken: false
volumes:
- configMap:
defaultMode: 420
name: workload-token
name: workload-token
#- configMap:
# defaultMode: 420
# name: istiod-ca-root-cert
# name: istiod-ca-root-cert
# This is the script that performs the step by step procedure to bring up an edge GW.
# We use KinD as our cluster environment the first section sets up the environment
#
# The prequisites and version info is as follows.
# The two clusters must have network rechability to each other. We use mettallb to help achieve that
# Pod level reachbility is not required.
# The kube apiservers in each cluster must be able to reach each other. Appropriate Kind settings allow that.
#
# kind version
# kind v0.11.1 go1.17.2 linux/amd64
# kubectl version
# Client Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.0", GitCommit:"c2b5237ccd9c0f1d600d3072634ca66cefdf272f", GitTreeState:"clean", BuildDate:"2021-08-04T18:03:20Z", GoVersion:"go1.16.6", Compiler:"gc", Platform:"linux/amd64"}
# Server Version: version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.1", GitCommit:"5e58841cce77d4bc13713ad2b91fa0d961e69192", GitTreeState:"clean", BuildDate:"2021-05-21T23:01:33Z", GoVersion:"go1.16.4", Compiler:"gc", Platform:"linux/amd64"}
#
# ------------------------ Section 1 Cluster Env ----------------------------------
rm /root/.banzai/backyards/kind-central*
. kind_utils.sh
kind delete cluster --name=central
kind delete cluster --name=edge
# Create 2 clusters
kind_create_cluster central kubeconfigs/central.yaml 10.87.49.211 0
kind_create_cluster edge kubeconfigs/edge.yaml 10.87.49.211 1
#
# To minimize edge components want to manually load proxy image for now
# The proxy image to load will depend on which smm version you are using and whether you are using
# the default configuration or overwriting with a debug image
# If using upstream istio you should be able to directly load images from docker.
#kind load docker-image 033498657557.dkr.ecr.us-east-2.amazonaws.com/banzaicloud/istio-proxyv2:v1.11.4-bzc.1 --name=edge
#kind load docker-image 033498657557.dkr.ecr.us-east-2.amazonaws.com/banzaicloud/istio-proxyv2:v1.11.4-bzc.1-dbg --name=edge
kind load docker-image proxy-agent:debug --name=edge
cluster1=kubeconfigs/central.yaml
cluster2=kubeconfigs/edge.yaml
# Setup metallb
cluster1=kubeconfigs/central.yaml cluster2=kubeconfigs/edge.yaml ./multicluster-metallb.sh
#
# ------------------------ Section 2 Istiod Control Plane -------------------------
# We need the Istiod control plane running in the central cluster. We use Service Mesh Manger (SMM) cli
# commands to install what we need. We used version 1.8.2 for this work.
#
./smm-cli-1.8.2 install -c $cluster1 -a
#
# ------------------------ Section 3 Webhook Setup --------------------------------
# The webhook configuration is required and applied to the edge cluster.
# The first thing to do is make sure the ISTIOD_CUSTOM_HOST values include the host name that will be used in the URL.
# For this work we used an IP address to avoid DNS complexities.
# In an SMM installation the easy way to change this is to edit the istiocontrolplanes CR
# We also change the pull policy to IfNotPresent so we don't need additional secrets to pull from privated ECRs.
# add to env: section
# - name: ISTIOD_CUSTOM_HOST
# value: istiod-cp-v111x.istio-system.svc,172.18.251.1
# 172.18.251.1 is the address of the meshexpansion gateway (the ingress point for istiod traffic) on the central cluster
kubectl edit istiocontrolplanes cp-v111x -n istio-system --kubeconfig="${cluster1}"
# sleep to let the above change propogate
sleep 10
#
# Note all steps below are edge cluster related and should be repeated for each edge cluster
#
# We use the central clusters webhook as a model for the remote clusters webhook configuration
# The main things required is a correct CAbundle value - using the central cluster as a model insures this
# Add the url for the location of the webhook. url: https://$IP:15017/inject/:ENV:cluster=${NAME}:ENV:net=${network1}
# $IP is the external IP for the ingress in the central cluster that allows istiod to be reached.
# $NAME is the name used of the cluster within istiod. We use a istiodctl command below creae the entry
# $network1 is the name of the network you want to use for the edge clusters
kubectl get mutatingwebhookconfiguration istio-sidecar-injector-cp-v111x-istio-system $CEN -o yaml > webhook_edge.yaml
#cat webhook_edge.yaml | sed '/path: \/inject/d' | sed '/name: istiod-cp-v111x/d' | sed '/namespace: istio-system/d' | sed '/port: 443/d' | sed 's/service:/url: https:\/\/172.18.251.1:15017\/inject\/:ENV:cluster=edge:ENV:net=k3d-demo3/g' | kubectl apply -f - --kubeconfig="${cluster2}"
#remove the edge cluster
cat webhook_edge.yaml | sed '/path: \/inject/d' | sed '/name: istiod-cp-v111x/d' | sed '/namespace: istio-system/d' | sed '/port: 443/d' | sed 's/service:/url: https:\/\/172.18.251.1:15017\/inject/g' | kubectl apply -f - --kubeconfig="${cluster2}"
#
# ------------------------ Section 4 Kube Apiserver Access --------------------------------
#
# These steps should still be good for when remote API access is available, but commenting out since that isn't permitted at current time.
#
# We need to create a cluster entry in istiod running in the central cluster. We do this with an istiodctl command that
# creates a secret with the access credentails for the edge cluster.
# Note this was failing with a 1.11 based istioctl we used version 1.8.6
#echo "create remote secret details"
#$HOME/istioctl/istio-1.8.6/bin/istioctl x create-remote-secret --kubeconfig="${cluster2}" --type=remote --namespace=istio-system --service-account=istiod-service-account --create-service-account=true --name=edge
#$HOME/istioctl/istio-1.8.6/bin/istioctl x create-remote-secret --kubeconfig="${cluster2}" --type=remote --namespace=istio-system --service-account=istiod-service-account --create-service-account=true --name=edge | kubectl apply -f - --kubeconfig="${cluster1}"
#
# ------------------- Section 5 xDS mTLS Credentials with remote API access --------------------------------
#
# To properly connect using mTLS for the xDS exchange we need to add a root cert to the namespace the gateway will be in.
# Create root cert for mounting. Note namespace should match GW deployment
kubectl get configmap istio-ca-root-cert-cp-v111x -n istio-system $CEN -o yaml | sed '/namespace/d' | kubectl apply -f - --kubeconfig="${cluster2}"
# ------------------- Section 5 xDS & CSR Credential creation --------------------------------
#
# For Envoy to properly connect to the istio control plane we need provide a Cert and a token
# The cert creation is the same for both cases and above.
# This is done partially manually at current time.
SA_SECRET_NAME=$(kubectl get serviceaccount default -o json --kubeconfig="${cluster1}" | jq -r '.secrets[0].name')
if [ -z "$SA_SECRET_NAME" ]; then
echo "Cannot find secret named default"
exit 1
fi
kubectl get secret $SA_SECRET_NAME $CEN -o json --kubeconfig="${cluster1}" | jq -r .data.token | base64 -d > workload-tokenT.yaml
cat workload-tokenA.yaml > tmp_tok; cat workload-tokenT.yaml >> tmp_tok; cat workload-tokenB.yaml >> tmp_tok
kubectl apply -f tmp_tok --kubeconfig="${cluster2}"
# we should explore creating and mounting this as a secret instead of a configmap
#kubectl get secret istio-ca-secret -n istio-system -o json --kubeconfig="${cluster1}" | jq -r '.data["ca-cert.pem"]' | base64 -d > tmp_cert_cert
#cat tmp_cert_cert
#echo "Edit the file cert_cfgmap_template and add in the cert from file tmp_cert_cert"
#sleep 45
#kubectl apply -f cert_cfgmap_template --kubeconfig="${cluster2}"
#
# ------------------------ Section 6 Service and Endpoint --------------------------------
#
# We need to ensure the discovery address used by the proxy is both resolvable and reachable from the edge cluster
# We do that by creating a selectorless service and endpoint. We use the standard naming for the address
# and the endpoint points to the ingress address on the central cluster that provides istiod reachability.
#
# Create a local svc pointing to remote ingress GW for pilot and CA access.
kubectl create namespace istio-system --kubeconfig="${cluster2}"
kubectl create -f discovery_svc.yaml --kubeconfig="${cluster2}"
#
# ------------------------ Section 6 Deploy Gateway --------------------------------
#
# we deploy a gatway deployment resource and the service resource. The webhook injection
# will add additional environmental variables, startup arguements and flags and xDS settings
#kubectl apply -f gateway_deploy_svc.yaml --kubeconfig="${cluster2}"
# change the deployment to
# kubectl apply -f gateway_deploy_svc.yaml --kubeconfig="${cluster2}"
kubectl apply -f gateway_deploy_token.yaml --kubeconfig="${cluster2}"
exit
#
# ------------------------ Section 7 Deploy Any Apps --------------------------------
#
# First ensure the namespace that is being used is properly labeled for sidecar injection.
# For our work we deployed two helloworld pods (one in each cluster) and a sleep pod in the edge cluster
# We used the manifest provided by istio and referenced here:
# https://istio.io/latest/docs/setup/install/external-controlplane/
# point samples to an istio repo and CEN
#sleep 10
#kubectl edit namespace default
export samples=../../istio.io/istio/samples
kubectl apply -f $samples/helloworld/helloworld.yaml -l service=helloworld --kubeconfig="${cluster2}"
kubectl apply -f $samples/helloworld/helloworld.yaml -l version=v1 --kubeconfig="${cluster2}"
kubectl apply -f $samples/sleep/sleep.yaml --kubeconfig="${cluster2}"
kubectl apply -f $samples/helloworld/helloworld.yaml -l version=v1 --kubeconfig="${cluster1}"
kubectl apply -f $samples/helloworld/helloworld.yaml -l service=helloworld --kubeconfig="${cluster1}"
# Then we curled to helloworld and saw the request was loadbalanced to the 2 helloworld pods.
#
#kubectl exec -c sleep sleep-557747455f-x6j4m --kubeconfig="${cluster1}" -- curl -sS helloworld:5000/hello
Here is the command I used to build the proxy
------------------------------------------------------------------------------------
docker build -t proxy-agent:debug -f /root/go/src/eti/istio/pilot/docker/Dockerfile.bzc-proxyv2 --build-arg AWS_ACCESS_KEY_ID --build-arg AWS_SECRET_ACCESS_KEY .
Below is the diff I used. I don't think the diff to the make build command in the docker file is needed but
I did not confirm. There is only one code change everything else is additional debug info. Search for Hack.
------------------------------------------------------------------------------------
diff --git a/pilot/cmd/pilot-agent/options/security.go b/pilot/cmd/pilot-agent/options/security.go
index 2e7e2a0..1b06f52 100644
--- a/pilot/cmd/pilot-agent/options/security.go
+++ b/pilot/cmd/pilot-agent/options/security.go
@@ -84,10 +84,15 @@ func SetupSecurityOptions(proxyConfig *meshconfig.ProxyConfig, secOpt *security.
switch jwtPolicy {
case jwt.PolicyThirdParty:
log.Info("JWT policy is third-party-jwt")
+ log.Info("JAJ TrustworthyJWTPath")
jwtPath = constants.TrustworthyJWTPath
case jwt.PolicyFirstParty:
log.Info("JWT policy is first-party-jwt")
+ log.Info("JAJ K8sSAJwtFileName")
jwtPath = securityModel.K8sSAJwtFileName
+ // JAJ debug notes. Add some point added the below line. I am not sure why I did that and appear maybe
+ // it is now causing issues.
+ //jwtPath = constants.TrustworthyJWTPath
default:
log.Info("Using existing certs")
}
diff --git a/pilot/docker/Dockerfile.bzc-proxyv2 b/pilot/docker/Dockerfile.bzc-proxyv2
index e84b0ce..5c2b712 100644
--- a/pilot/docker/Dockerfile.bzc-proxyv2
+++ b/pilot/docker/Dockerfile.bzc-proxyv2
@@ -45,7 +45,8 @@ ENV PROXY_REPO_SHA ${PROXY_REPO_SHA}
RUN ISTIO_ENVOY_LINUX_DEBUG_URL=${ISTIO_ENVOY_BASE_URL}/envoy-alpha-${PROXY_REPO_SHA}.tar.gz \
ISTIO_ENVOY_LINUX_RELEASE_URL=${ISTIO_ENVOY_BASE_URL}/envoy-alpha-${PROXY_REPO_SHA}.tar.gz \
- IGNORE_DIRTY_TREE=1 BUILD_WITH_CONTAINER=0 make build deb rpm-7/fpm
+ #IGNORE_DIRTY_TREE=1 BUILD_WITH_CONTAINER=0 make build deb rpm-7/fpm
+ IGNORE_DIRTY_TREE=1 BUILD_WITH_CONTAINER=0 make build
# The following section is used as base image if BASE_DISTRIBUTION=default
FROM containers.cisco.com/banzaicloud/istio-base:ubuntu-focal-20210401 as default
diff --git a/pilot/pkg/xds/ads.go b/pilot/pkg/xds/ads.go
index 9332107..430249f 100644
--- a/pilot/pkg/xds/ads.go
+++ b/pilot/pkg/xds/ads.go
@@ -281,6 +281,7 @@ func (s *DiscoveryServer) Stream(stream DiscoveryStream) error {
return status.Errorf(codes.ResourceExhausted, "request rate limit exceeded: %v", err)
}
+ log.Warnf("ADS: JAJ")
ids, err := s.authenticate(ctx)
if err != nil {
return status.Error(codes.Unauthenticated, err.Error())
diff --git a/pilot/pkg/xds/authentication.go b/pilot/pkg/xds/authentication.go
index 03948cc..cf6e180 100644
--- a/pilot/pkg/xds/authentication.go
+++ b/pilot/pkg/xds/authentication.go
@@ -54,7 +54,9 @@ func (s *DiscoveryServer) authenticate(ctx context.Context) ([]string, error) {
}
authFailMsgs := []string{}
for _, authn := range s.Authenticators {
+ log.Errorf("Failed to authenticate client from %s: %s", peerInfo.Addr.String(), strings.Join(authFailMsgs, "; "))
u, err := authn.Authenticate(ctx)
+ log.Infof("JAJ xds authentire")
// If one authenticator passes, return
if u != nil && u.Identities != nil && err == nil {
return u.Identities, nil
diff --git a/pilot/pkg/xds/debug.go b/pilot/pkg/xds/debug.go
index 8475d5d..88150f6 100644
--- a/pilot/pkg/xds/debug.go
+++ b/pilot/pkg/xds/debug.go
@@ -232,6 +232,7 @@ func (s *DiscoveryServer) allowAuthenticatedOrLocalhost(next http.Handler) http.
var ids []string
for _, authn := range s.Authenticators {
u, err := authn.AuthenticateRequest(req)
+ istiolog.Warnf("JAJ authenticating: %v", err)
// If one authenticator passes, return
if u != nil && u.Identities != nil && err == nil {
ids = u.Identities
diff --git a/pkg/config/constants/constants.go b/pkg/config/constants/constants.go
index 2e3bfc8..e237fbf 100644
--- a/pkg/config/constants/constants.go
+++ b/pkg/config/constants/constants.go
@@ -128,6 +128,9 @@ const (
// TrustworthyJWTPath is the default 3P token to authenticate with third party services
TrustworthyJWTPath = "./var/run/secrets/tokens/istio-token"
+ // TrustworthyJWTPath is the default 3P token to authenticate with third party services
+ //JAJTrustworthyJWTPath = "./var/run/secrets/tokens/jaj-token"
+
// CertProviderIstiod uses istiod self signed DNS certificates for the control plane
CertProviderIstiod = "istiod"
// CertProviderKubernetes uses the Kubernetes CSR API to generate a DNS certificate for the control plane
diff --git a/pkg/istio-agent/agent.go b/pkg/istio-agent/agent.go
index 9d3408e..ce18267 100644
--- a/pkg/istio-agent/agent.go
+++ b/pkg/istio-agent/agent.go
@@ -572,6 +572,7 @@ func (a *Agent) FindRootCAForXDS() (string, error) {
// Old style - mounted cert. This is used for XDS auth only,
// not connecting to CA_ADDR because this mode uses external
// agent (Secret refresh, etc)
+ log.Info("JAJ using old style CA")
return security.DefaultRootCertFilePath, nil
} else if a.secOpts.PilotCertProvider == constants.CertProviderKubernetes {
// Using K8S - this is likely incorrect, may work by accident (https://github.com/istio/istio/issues/22161)
@@ -621,6 +622,7 @@ func (a *Agent) FindRootCAForCA() (string, error) {
// API is GA.
rootCAPath = k8sCAPath // ./var/run/secrets/kubernetes.io/serviceaccount/ca.crt
} else if a.secOpts.PilotCertProvider == constants.CertProviderCustom {
+ log.Info("JAJ using DefaultRootCertFilePath ")
rootCAPath = security.DefaultRootCertFilePath // ./etc/certs/root-cert.pem
} else if a.secOpts.ProvCert != "" {
// This was never completely correct - PROV_CERT are only intended for auth with CA_ADDR,
diff --git a/pkg/istio-agent/xds_proxy.go b/pkg/istio-agent/xds_proxy.go
index fb347c5..8f0c18e 100644
--- a/pkg/istio-agent/xds_proxy.go
+++ b/pkg/istio-agent/xds_proxy.go
@@ -342,12 +342,14 @@ func (p *XdsProxy) handleStream(downstream adsStream) error {
if !initialRequestsSent && req.TypeUrl == v3.ListenerType {
// fire off an initial NDS request
if _, f := p.handlers[v3.NameTableType]; f {
+ proxyLog.Infof("JAJ NDS request")
con.sendRequest(&discovery.DiscoveryRequest{
TypeUrl: v3.NameTableType,
})
}
// fire off an initial PCDS request
if _, f := p.handlers[v3.ProxyConfigType]; f {
+ proxyLog.Infof("JAJ PCDS request")
con.sendRequest(&discovery.DiscoveryRequest{
TypeUrl: v3.ProxyConfigType,
})
@@ -356,6 +358,7 @@ func (p *XdsProxy) handleStream(downstream adsStream) error {
p.connectedMutex.RLock()
initialRequest := p.initialRequest
if initialRequest != nil {
+ proxyLog.Infof("JAJ connect request")
con.sendRequest(initialRequest)
}
p.connectedMutex.RUnlock()
diff --git a/security/pkg/k8s/tokenreview/k8sauthn.go b/security/pkg/k8s/tokenreview/k8sauthn.go
index 9a3b37b..911eebf 100644
--- a/security/pkg/k8s/tokenreview/k8sauthn.go
+++ b/security/pkg/k8s/tokenreview/k8sauthn.go
@@ -50,6 +50,27 @@ func getTokenReviewResult(tokenReview *k8sauth.TokenReview) ([]string, error) {
if tokenReview.Status.Error != "" {
return nil, fmt.Errorf("the service account authentication returns an error: %v",
tokenReview.Status.Error)
+/*
+ jajinServiceAccountGroup := false
+ for _, group := range tokenReview.Status.User.Groups {
+ if group == "system:serviceaccounts" {
+ jajinServiceAccountGroup = true
+ break
+ }
+ }
+ if !jajinServiceAccountGroup {
+ return nil, fmt.Errorf("the token is not a service account")
+ }
+ // "username" is in the form of system:serviceaccount:{namespace}:{service account name}",
+ // e.g., "username":"system:serviceaccount:default:example-pod-sa"
+ jajsubStrings := strings.Split(tokenReview.Status.User.Username, ":")
+ if len(jajsubStrings) != 4 {
+ return nil, fmt.Errorf("invalid username field in the token review result")
+ }
+ jajsaName := jajsubStrings[3]
+ return nil, fmt.Errorf("the returns an error: saName= %s error = %v",
+ jajsaName, tokenReview.Status.Error)
+*/
}
// An example SA token:
// {"alg":"RS256","typ":"JWT"}
diff --git a/security/pkg/k8s/x509sdattributes/x509sdattributes.go b/security/pkg/k8s/x509sdattributes/x509sdattributes.go
index c4ecbb4..c76d7c1 100644
--- a/security/pkg/k8s/x509sdattributes/x509sdattributes.go
+++ b/security/pkg/k8s/x509sdattributes/x509sdattributes.go
@@ -59,7 +59,7 @@ func NewSDAttributesExtractor(extractors ...PodAttributeExtractor) Extractor {
func (e *sdAttributesExtractor) Extract(ctx context.Context, podNamespacedName NamespacedName, caller CallerID, kubeClient kubernetes.Interface) (SDAttributes, error) {
pod, err := kubeClient.CoreV1().Pods(podNamespacedName.Namespace).Get(ctx, podNamespacedName.Name, metav1.GetOptions{})
if err != nil {
- return nil, fmt.Errorf("failed to get pod: %v", err)
+ return nil, fmt.Errorf("JAJ failed to get pod: %v", err)
}
if pod.Namespace != caller.Namespace {
diff --git a/security/pkg/nodeagent/cache/secretcache.go b/security/pkg/nodeagent/cache/secretcache.go
index c0b3c26..581fadb 100644
--- a/security/pkg/nodeagent/cache/secretcache.go
+++ b/security/pkg/nodeagent/cache/secretcache.go
@@ -225,6 +225,7 @@ func (sc *SecretManagerClient) CallUpdateCallback(resourceName string) {
func (sc *SecretManagerClient) getCachedSecret(resourceName string) (secret *security.SecretItem) {
var rootCertBundle []byte
var ns *security.SecretItem
+ cacheLog.Infof("JAJ getCachedSecret %q", resourceName)
if c := sc.cache.GetWorkload(); c != nil {
if resourceName == security.RootCertReqResourceName {
@@ -244,6 +245,7 @@ func (sc *SecretManagerClient) getCachedSecret(resourceName string) (secret *sec
CreatedTime: c.CreatedTime,
}
cacheLog.WithLabels("ttl", time.Until(c.ExpireTime)).Info("returned workload certificate from cache")
+ cacheLog.Infof("JAJ expire time ")
}
return ns
@@ -317,6 +319,7 @@ func (sc *SecretManagerClient) GenerateSecret(resourceName string) (secret *secu
// send request to CA to get new workload certificate
ns, err = sc.generateNewSecret(resourceName)
+ cacheLog.Info("JAJ generateNewSecret")
if err != nil {
return nil, fmt.Errorf("failed to generate workload certificate: %v", err)
}
@@ -762,6 +765,7 @@ func (sc *SecretManagerClient) watchCustomIDSecret() {
fileWatcher := filewatcher.NewWatcher()
err := fileWatcher.Add(file)
if err != nil {
+ cacheLog.Infof("custom identifier file: %s", file)
cacheLog.Errorf("failed to watch custom identifier file: %v", err)
return
}
diff --git a/security/pkg/nodeagent/caclient/providers/citadel/client.go b/security/pkg/nodeagent/caclient/providers/citadel/client.go
index c299a6f..168ff03 100644
--- a/security/pkg/nodeagent/caclient/providers/citadel/client.go
+++ b/security/pkg/nodeagent/caclient/providers/citadel/client.go
@@ -73,6 +73,7 @@ func NewCitadelClient(opts *security.Options, tls bool, rootCert []byte) (*Citad
return nil, fmt.Errorf("failed to connect to endpoint %s", opts.CAEndpoint)
}
c.conn = conn
+ //JAJ Extreme debuggin
c.client = pb.NewIstioCertificateServiceClient(conn)
return c, nil
}
@@ -103,12 +104,16 @@ func (c *CitadelClient) CSRSign(csrPEM []byte, certValidTTLInSec int64) ([]strin
pairs := []string{
clusterIDMeta, c.opts.ClusterID,
}
+ /* JAJ extreme debugging This is the Hack that allows the Pod to come up.
podNamespace := os.Getenv("POD_NAMESPACE")
podName := os.Getenv("POD_NAME")
if podName != "" && podNamespace != "" {
pairs = append(pairs, podNamespacedNameMeta, fmt.Sprintf("%s/%s", podNamespace, podName))
+ citadelClientLog.Info("JAJ adding podName and namespace ")
}
+ */
ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs(pairs...))
+ // JAJ how is this done for VM.
resp, err := c.client.CreateCertificate(ctx, req)
if err != nil {
return nil, fmt.Errorf("create certificate: %v", err)
@@ -124,6 +129,7 @@ func (c *CitadelClient) CSRSign(csrPEM []byte, certValidTTLInSec int64) ([]strin
func (c *CitadelClient) getTLSDialOption() (grpc.DialOption, error) {
// Load the TLS root certificate from the specified file.
// Create a certificate pool
+ // JAJ this is the key to how the client send the token vs. the cert.
var certPool *x509.CertPool
var err error
if c.caTLSRootCert == nil {
@@ -145,8 +151,10 @@ func (c *CitadelClient) getTLSDialOption() (grpc.DialOption, error) {
config := tls.Config{
Certificates: []tls.Certificate{certificate},
GetClientCertificate: func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ citadelClientLog.Info("c.opts.ProvCert :", c.opts.ProvCert)
if c.opts.ProvCert != "" {
// Load the certificate from disk
+ citadelClientLog.Info("JAJ loading cert from disk : ")
certificate, err = tls.LoadX509KeyPair(
filepath.Join(c.opts.ProvCert, "cert-chain.pem"),
filepath.Join(c.opts.ProvCert, "key.pem"))
diff --git a/security/pkg/server/ca/authenticate/cert_authenticator.go b/security/pkg/server/ca/authenticate/cert_authenticator.go
index 1f72163..d102400 100644
--- a/security/pkg/server/ca/authenticate/cert_authenticator.go
+++ b/security/pkg/server/ca/authenticate/cert_authenticator.go
@@ -56,7 +56,7 @@ func (cca *ClientCertAuthenticator) Authenticate(ctx context.Context) (*security
tlsInfo := peer.AuthInfo.(credentials.TLSInfo)
chains := tlsInfo.State.VerifiedChains
if len(chains) == 0 || len(chains[0]) == 0 {
- return nil, fmt.Errorf("no verified chain is found")
+ return nil, fmt.Errorf("no verified chain is found JAJ")
}
ids, err := util.ExtractIDs(chains[0][0].Extensions)
@@ -79,7 +79,7 @@ func (cca *ClientCertAuthenticator) AuthenticateRequest(req *http.Request) (*sec
chains := req.TLS.VerifiedChains
if len(chains) == 0 || len(chains[0]) == 0 {
- return nil, fmt.Errorf("no verified chain is found")
+ return nil, fmt.Errorf("no verified chain is found JAJ1")
}
ids, err := util.ExtractIDs(chains[0][0].Extensions)
diff --git a/security/pkg/server/ca/authenticate/kubeauth/kube_jwt.go b/security/pkg/server/ca/authenticate/kubeauth/kube_jwt.go
index ec0e684..2f90c7e 100644
--- a/security/pkg/server/ca/authenticate/kubeauth/kube_jwt.go
+++ b/security/pkg/server/ca/authenticate/kubeauth/kube_jwt.go
@@ -92,6 +92,7 @@ func isAllowedKubernetesAudience(a string) bool {
func (a *KubeJWTAuthenticator) AuthenticateRequest(req *http.Request) (*security.Caller, error) {
targetJWT, err := security.ExtractRequestToken(req)
+ log.Warnf("JAJ AuthenticateRequest ")
if err != nil {
return nil, fmt.Errorf("target JWT extraction error: %v", err)
}
@@ -107,6 +108,7 @@ func (a *KubeJWTAuthenticator) AuthenticateRequest(req *http.Request) (*security
// The returned Caller.Identities is in SPIFFE format.
func (a *KubeJWTAuthenticator) Authenticate(ctx context.Context) (*security.Caller, error) {
targetJWT, err := security.ExtractBearerToken(ctx)
+ log.Warnf("JAJ Authenticate ")
if err != nil {
return nil, fmt.Errorf("target JWT extraction error: %v", err)
}
@@ -132,6 +134,7 @@ func (a *KubeJWTAuthenticator) authenticate(targetJWT string, clusterID cluster.
// If 'Require3PToken' is set - we will also set the audiences field, forcing the check.
// If Require3P is not set - and token does not have audience - we will
// tolerate the unbound tokens.
+ log.Warnf("JAJ targetJWT %s clusterid = %q podName = %v", targetJWT, clusterID, podNamespacedName)
if !util.IsK8SUnbound(targetJWT) || security.Require3PToken.Get() {
aud = security.TokenAudiences
if tokenAud, _ := util.ExtractJwtAud(targetJWT); len(tokenAud) == 1 && isAllowedKubernetesAudience(tokenAud[0]) {
@@ -153,6 +156,7 @@ func (a *KubeJWTAuthenticator) authenticate(targetJWT string, clusterID cluster.
} else {
// No audience will be passed to the check if the token
// is unbound and the setting to require bound tokens is off
+ log.Warnf("JAJ No audience set")
aud = nil
}
id, err := tokenreview.ValidateK8sJwt(kubeClient, targetJWT, aud)
diff --git a/security/pkg/server/ca/authenticate/oidc.go b/security/pkg/server/ca/authenticate/oidc.go
index 042e5e9..7089ff2 100644
--- a/security/pkg/server/ca/authenticate/oidc.go
+++ b/security/pkg/server/ca/authenticate/oidc.go
@@ -79,6 +79,7 @@ func (j *JwtAuthenticator) AuthenticateRequest(req *http.Request) (*security.Cal
// Authenticate - based on the old OIDC authenticator for mesh expansion.
func (j *JwtAuthenticator) Authenticate(ctx context.Context) (*security.Caller, error) {
bearerToken, err := security.ExtractBearerToken(ctx)
+ // JAJ this might be the path for VM expansion authentication
if err != nil {
return nil, fmt.Errorf("ID token extraction error: %v", err)
}
diff --git a/security/pkg/server/ca/server.go b/security/pkg/server/ca/server.go
index 1244565..30bb257 100644
--- a/security/pkg/server/ca/server.go
+++ b/security/pkg/server/ca/server.go
@@ -75,6 +75,7 @@ func (s *Server) CreateCertificate(ctx context.Context, request *pb.IstioCertifi
var err error
s.monitoring.CSR.Increment()
+ log.Infof("JAJ Create Certificate")
caller := Authenticate(ctx, s.Authenticators)
if caller == nil {
s.monitoring.AuthnError.Increment()
@@ -192,6 +193,7 @@ func New(ca CertificateAuthority, ttl time.Duration,
func Authenticate(ctx context.Context, auth []security.Authenticator) *security.Caller {
// TODO: apply different authenticators in specific order / according to configuration.
var errMsg string
+ log.Infof("JAJ apply different authenticators in specific order")
for id, authn := range auth {
u, err := authn.Authenticate(ctx)
if err != nil {
apiVersion: v1
data:
token: |
eyJhbGciOiJSUzI1NiIsImtpZCI6ImlrVUFqdXFzNlpXOG5TNkhDS2hCZFNkRmc0aFZidGNHUGt5djY5YUFpM2MifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tajV0bTciLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGVmYXVsdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImJkYTMzMzNmLTFiZDUtNDM4Mi04ZmNiLWZkMGMxNTE4OTNlOCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.skFzw0QAbze7jx5S7dD3rlCG5BIUdMzQILxaOapg9gq63hNbx0KWdPwH4GqiSLUi1ebjdA2VKPCoAeO-FgECZ4gnttp-lzray_CZguHWkV2HyzIdo_1hlDdlMAyq7cUG_CCpIt8ZcrQHwbCApG9tzFDI9akVkPp8CEA7wTol20BrHTTB1U7HUr3QsDS2pQ37EAnHh3FYTsJYJgbU-6YYv-H8xbdzcJ2Lwk5lrm6tIKxcPrCJvTUaALwRCoraZssVIC4zcyzA1lu_d-6RyBgPpOj1fFSLUkrGFKlPNy4p5vpqa7yM_lgZ60SmWwaFEq4j9bE9MsRlgF2xown0vej-yg
kind: ConfigMap
metadata:
annotations:
labels:
name: workload-token
namespace: default
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
annotations:
banzaicloud.com/last-applied: UEsDBBQACAAIAAAAAAAAAAAAAAAAAAAAAAAIAAAAb3JpZ2luYWzUU0GP0zwQ/S9z/OSkir49oNzQUhAHVmhBcEA9TJ1pa+p4LHuS3VLlvyPXSZutVmJPiL3Fk/G8N+89HwG9+UYhGnZQAzatiek70NZECSiGXbl/E0vDi74CBXvjGqjhUycoxm2/03rHvL9ltzHbLveDgpYEGxSE+gjoHMvpR0zHNbpfaLTlrklDW3S4pabQ3Hp25ARq0L7oq6p6LEwUw0U8RKF2PDQmau4pHIpIoTeaWoq7Uqdqqbld9BVav8Mqt2t2Eth6i45AXWMHsijUFMJQwxxrMTGAQYHFNdkTdfQeaoimIY2hMO4naeEAKt/NE/sZ/3I+ExQEsoSRzmDsKaBwuCyV8By2l5ZrsOJZbdK1h+xEhPrH8eLjPfWGHkaD0z/oqzUJJiv7ClYKtDXkJBuYlhxlTZ9zJs0ZGTLF6FHTlW6gwKPsoIZFJpwKHATqm5v/h0HBBo3tAn1ma/QBaniPxsJ55UB9eR5dXq9eTirPCXwhm02oj9Ci6N3y0QeK07ZH2NNhIjkZpGASHmr4mOLao+3opM7z1q0G9WTSyClnfTbsHVO8Y1k+migwrAYFvE6NL2Q5bnxe9CLiDOOO5YrzBm0kWJ3wQmdpjIA3HwJ3/tSSjL489DEHqZgHT7Xb++Xbr8tUDxS5CzoDeG5iKkbNPvn0HwzpZBpabjakJZ5oOYIk0+uNXvbqL+XuaVReQ7zUn5d64WP6B4K6Gn4HAAD//1BLBwibmygVIQIAAPoGAABQSwECFAAUAAgACAAAAAAAm5soFSECAAD6BgAACAAAAAAAAAAAAAAAAAAAAAAAb3JpZ2luYWxQSwUGAAAAAAEAAQA2AAAAVwIAAAAA
banzaicloud.io/managed-component: cp-v111x-istio-system-istio-discovery-servicemesh.cisco.com/v1alpha1-istiocontrolplane
banzaicloud.io/related-to: istio-system/cp-v111x
creationTimestamp: "2022-03-30T13:03:26Z"
generation: 2
labels:
app: sidecar-injector
istio.io/rev: cp-v111x.istio-system
release: istio-operator-discovery
name: istio-sidecar-injector-cp-v111x-istio-system
resourceVersion: "2022"
uid: 7350e949-36b3-4ccc-b204-26d55b3f5d81
webhooks:
- admissionReviewVersions:
- v1beta1
- v1
clientConfig:
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvVENDQWVXZ0F3SUJBZ0lSQVBibjVRRndsY00rclFuVGYrYjZPV2t3RFFZSktvWklodmNOQVFFTEJRQXcKR0RFV01CUUdBMVVFQ2hNTlkyeDFjM1JsY2k1c2IyTmhiREFlRncweU1qQXpNekF4TXpBek16VmFGdzB6TWpBegpNamN4TXpBek16VmFNQmd4RmpBVUJnTlZCQW9URFdOc2RYTjBaWEl1Ykc5allXd3dnZ0VpTUEwR0NTcUdTSWIzCkRRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRRFZHM3U1b3dzYnpkWVgxU0I1OVplRHpmZlJhZHVzYVBqM01tN1EKS2x0MXl4bm1IMFZ0WjMrR1BCRFF0TGd1Y2JIRmpzeW83aFAwOHJSVExNQ2tXd1VMTWFoNzV2TFpEclBwQ3R5UgpjNVdmd3pHSXkxb1FHbXJFbnZNQmszVlpwNDhpVmxaUmN3MC9kelFONTJvNWNibDFmbnUrK3Y1SWRRKzBnWjNSCmR6dnQ2T0VvQmc2VnRpTWk2ZnNuVHdXSFNxUFk1TmhyakZjQldSUlJKK1Q4NmpFaVY5aHl1YTlnRkp0dzBhZ1kKTTgySXczdHFrZFFlWTkxNzF4Nk54Y0k2V1BHTE51VEtPbFZzWXU0RGkvQ3JaUjFDSFUxRE1qUndiL2pxcXJLaAoxV0tUUlBmMkRxM2NUZUVrUmM0MXdIUnNPZ29rMXNqMjlMei8vUmRSZyswUTRyVFpBZ01CQUFHalFqQkFNQTRHCkExVWREd0VCL3dRRUF3SUNCREFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVdCQlRFVGVpMUprL3kKb0xwdWxlZ0toc3NSb3d6MVRUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFFaWpaZWhMUkpodWxHbDdiWFRGYwpGNnRhNVMycnlEdmdHMCtYOWZxQ2d2RUZnMXRiT0FNRGZ2MkNQSENMejNlTUFOUUt0TUdUVEo2dFRFZzJmR0g2CmxzczVNOXBQMVVuVmRrME9vVGdub1N4UWlGWWFQU0YwZ2xpUUw4R0pSeUwzSE1MQmFlSThYMzk2UUQwQUc2WW4KNUlNMVJENk45NlFpU1pYTlpWR1VzWVdadzdxNkF6RWJMclE1OVZrMGs5RWdDUGtpUDJWTGd2clFTZHlvQ0hwbQpaNU1EWGsvVkRSV3V2aVZKRGExL2dPTlZIZzlhSlZNcmRaVVJHb1lzVStqMk93aW5ON3BrTVlya0tpczM2QWlvCkltc1cxUForN2hXK3dwV1FSZU10dWpEZkdLVU8xaFk2cGZwNWxZZCs1MTczM3VHTUR5K2FJL044WmhDWHpSbzQKVUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
service:
name: istiod-cp-v111x
namespace: istio-system
path: /inject
port: 443
failurePolicy: Fail
matchPolicy: Equivalent
name: rev.namespace.sidecar-injector.istio.io
namespaceSelector:
matchExpressions:
- key: istio.io/rev
operator: In
values:
- cp-v111x.istio-system
- key: istio-injection
operator: DoesNotExist
objectSelector:
matchExpressions:
- key: sidecar.istio.io/inject
operator: NotIn
values:
- "false"
reinvocationPolicy: Never
rules:
- apiGroups:
- ""
apiVersions:
- v1
operations:
- CREATE
resources:
- pods
scope: '*'
sideEffects: None
timeoutSeconds: 10
- admissionReviewVersions:
- v1beta1
- v1
clientConfig:
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvVENDQWVXZ0F3SUJBZ0lSQVBibjVRRndsY00rclFuVGYrYjZPV2t3RFFZSktvWklodmNOQVFFTEJRQXcKR0RFV01CUUdBMVVFQ2hNTlkyeDFjM1JsY2k1c2IyTmhiREFlRncweU1qQXpNekF4TXpBek16VmFGdzB6TWpBegpNamN4TXpBek16VmFNQmd4RmpBVUJnTlZCQW9URFdOc2RYTjBaWEl1Ykc5allXd3dnZ0VpTUEwR0NTcUdTSWIzCkRRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRRFZHM3U1b3dzYnpkWVgxU0I1OVplRHpmZlJhZHVzYVBqM01tN1EKS2x0MXl4bm1IMFZ0WjMrR1BCRFF0TGd1Y2JIRmpzeW83aFAwOHJSVExNQ2tXd1VMTWFoNzV2TFpEclBwQ3R5UgpjNVdmd3pHSXkxb1FHbXJFbnZNQmszVlpwNDhpVmxaUmN3MC9kelFONTJvNWNibDFmbnUrK3Y1SWRRKzBnWjNSCmR6dnQ2T0VvQmc2VnRpTWk2ZnNuVHdXSFNxUFk1TmhyakZjQldSUlJKK1Q4NmpFaVY5aHl1YTlnRkp0dzBhZ1kKTTgySXczdHFrZFFlWTkxNzF4Nk54Y0k2V1BHTE51VEtPbFZzWXU0RGkvQ3JaUjFDSFUxRE1qUndiL2pxcXJLaAoxV0tUUlBmMkRxM2NUZUVrUmM0MXdIUnNPZ29rMXNqMjlMei8vUmRSZyswUTRyVFpBZ01CQUFHalFqQkFNQTRHCkExVWREd0VCL3dRRUF3SUNCREFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVdCQlRFVGVpMUprL3kKb0xwdWxlZ0toc3NSb3d6MVRUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFFaWpaZWhMUkpodWxHbDdiWFRGYwpGNnRhNVMycnlEdmdHMCtYOWZxQ2d2RUZnMXRiT0FNRGZ2MkNQSENMejNlTUFOUUt0TUdUVEo2dFRFZzJmR0g2CmxzczVNOXBQMVVuVmRrME9vVGdub1N4UWlGWWFQU0YwZ2xpUUw4R0pSeUwzSE1MQmFlSThYMzk2UUQwQUc2WW4KNUlNMVJENk45NlFpU1pYTlpWR1VzWVdadzdxNkF6RWJMclE1OVZrMGs5RWdDUGtpUDJWTGd2clFTZHlvQ0hwbQpaNU1EWGsvVkRSV3V2aVZKRGExL2dPTlZIZzlhSlZNcmRaVVJHb1lzVStqMk93aW5ON3BrTVlya0tpczM2QWlvCkltc1cxUForN2hXK3dwV1FSZU10dWpEZkdLVU8xaFk2cGZwNWxZZCs1MTczM3VHTUR5K2FJL044WmhDWHpSbzQKVUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
service:
name: istiod-cp-v111x
namespace: istio-system
path: /inject
port: 443
failurePolicy: Fail
matchPolicy: Equivalent
name: rev.object.sidecar-injector.istio.io
namespaceSelector:
matchExpressions:
- key: istio.io/rev
operator: DoesNotExist
- key: istio-injection
operator: DoesNotExist
objectSelector:
matchExpressions:
- key: sidecar.istio.io/inject
operator: NotIn
values:
- "false"
- key: istio.io/rev
operator: In
values:
- cp-v111x.istio-system
reinvocationPolicy: Never
rules:
- apiGroups:
- ""
apiVersions:
- v1
operations:
- CREATE
resources:
- pods
scope: '*'
sideEffects: None
timeoutSeconds: 10
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment