-
-
Save dragonsinth/aea365732b60da3adc928dc18fff56ed to your computer and use it in GitHub Desktop.
package main | |
import ( | |
"context" | |
"encoding/base64" | |
"flag" | |
"fmt" | |
"log" | |
"google.golang.org/api/container/v1" | |
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | |
"k8s.io/client-go/kubernetes" | |
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // register GCP auth provider | |
"k8s.io/client-go/tools/clientcmd" | |
"k8s.io/client-go/tools/clientcmd/api" | |
) | |
var fProjectId = flag.String("projectId", "", "specify a project id to examine") | |
func main() { | |
flag.Parse() | |
if *fProjectId == "" { | |
log.Fatal("must specific -projectId") | |
} | |
if err := run(context.Background(), *fProjectId); err != nil { | |
log.Fatal(err) | |
} | |
} | |
func run(ctx context.Context, projectId string) error { | |
kubeConfig, err := getK8sClusterConfigs(ctx, projectId) | |
if err != nil { | |
return err | |
} | |
// Just list all the namespaces found in the project to test the API. | |
for clusterName := range kubeConfig.Clusters { | |
cfg, err := clientcmd.NewNonInteractiveClientConfig(*kubeConfig, clusterName, &clientcmd.ConfigOverrides{CurrentContext: clusterName}, nil).ClientConfig() | |
if err != nil { | |
return fmt.Errorf("failed to create Kubernetes configuration cluster=%s: %w", clusterName, err) | |
} | |
k8s, err := kubernetes.NewForConfig(cfg) | |
if err != nil { | |
return fmt.Errorf("failed to create Kubernetes client cluster=%s: %w", clusterName, err) | |
} | |
ns, err := k8s.CoreV1().Namespaces().List(metav1.ListOptions{}) | |
if err != nil { | |
return fmt.Errorf("failed to list namespaces cluster=%s: %w", clusterName, err) | |
} | |
log.Printf("Namespaces found in cluster=%s", clusterName) | |
for _, item := range ns.Items { | |
log.Println(item.Name) | |
} | |
} | |
return nil | |
} | |
func getK8sClusterConfigs(ctx context.Context, projectId string) (*api.Config, error) { | |
svc, err := container.NewService(ctx) | |
if err != nil { | |
return nil, fmt.Errorf("container.NewService: %w", err) | |
} | |
// Basic config structure | |
ret := api.Config{ | |
APIVersion: "v1", | |
Kind: "Config", | |
Clusters: map[string]*api.Cluster{}, // Clusters is a map of referencable names to cluster configs | |
AuthInfos: map[string]*api.AuthInfo{}, // AuthInfos is a map of referencable names to user configs | |
Contexts: map[string]*api.Context{}, // Contexts is a map of referencable names to context configs | |
} | |
// Ask Google for a list of all kube clusters in the given project. | |
resp, err := svc.Projects.Zones.Clusters.List(projectId, "-").Context(ctx).Do() | |
if err != nil { | |
return nil, fmt.Errorf("clusters list project=%s: %w", projectId, err) | |
} | |
for _, f := range resp.Clusters { | |
name := fmt.Sprintf("gke_%s_%s_%s", projectId, f.Zone, f.Name) | |
cert, err := base64.StdEncoding.DecodeString(f.MasterAuth.ClusterCaCertificate) | |
if err != nil { | |
return nil, fmt.Errorf("invalid certificate cluster=%s cert=%s: %w", name, f.MasterAuth.ClusterCaCertificate, err) | |
} | |
// example: gke_my-project_us-central1-b_cluster-1 => https://XX.XX.XX.XX | |
ret.Clusters[name] = &api.Cluster{ | |
CertificateAuthorityData: cert, | |
Server: "https://" + f.Endpoint, | |
} | |
// Just reuse the context name as an auth name. | |
ret.Contexts[name] = &api.Context{ | |
Cluster: name, | |
AuthInfo: name, | |
} | |
// GCP specific configation; use cloud platform scope. | |
ret.AuthInfos[name] = &api.AuthInfo{ | |
AuthProvider: &api.AuthProviderConfig{ | |
Name: "gcp", | |
Config: map[string]string{ | |
"scopes": "https://www.googleapis.com/auth/cloud-platform", | |
}, | |
}, | |
} | |
} | |
return &ret, nil | |
} |
@nstogner thanks a lot. I used this method for the token:
creds, err := google.CredentialsFromJSON(ctx, b, container.CloudPlatformScope)
I am getting following error while executing
2023/01/23 11:47:49 failed to create Kubernetes client cluster=<cluster_name>: The gcp auth plugin has been removed.
Please use the "gke-gcloud-auth-plugin" kubectl/client-go credential plugin instead.
See https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke for further details
exit status 1
I'll have to re-test, there's a new this works in 1.2.26, with respect to the CLI. I'm not sure what impact this has on pure Go. I'll dig around and see what I find.
Okay, I have an updated solution that should work with newer libs.
-
In your go.mod,
replace k8s.io/cloud-provider-gcp/providers => k8s.io/cloud-provider-gcp/providers v0.25.5
-
go get k8s.io/cloud-provider-gcp/pkg/clientauthplugin/gcp@bb1acae5826dc877953d4854faf414e860db2efa
-
Change the import:
_ "k8s.io/cloud-provider-gcp/pkg/clientauthplugin/gcp" // register GCP auth provider plugin.
The code moved here (but note the library is unsupported): https://github.com/kubernetes/cloud-provider-gcp/tree/bb1acae5826dc877953d4854faf414e860db2efa/pkg/clientauthplugin
Hey @dragonsinth it's working for me. But when i try to import same with helm packages it is upgrading the kubernetes dependency
i am trying to import the helm dependency :
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/cli"
I want to fetch all the installed helm releases from running cluster.
But when i do
go get helm.sh/helm/v3/pkg/action
go get helm.sh/helm/v3/pkg/cli
In go.mod file it is upgrading the version of k8s
k8s.io/client-go v0.25.5 - > v0.26.0
Can you please help me with this issue.
@eben-rockx see this comment, this worked for me at least
https://gist.github.com/dragonsinth/aea365732b60da3adc928dc18fff56ed?permalink_comment_id=4446169#gistcomment-4446169
Client dependencies being upgraded forcefully after installing the helm modules
go get helm.sh/helm/v3/pkg/action
go get helm.sh/helm/v3/pkg/cli
k8s.io/client-go v0.25.5 - > v0.26.0
To simulate the issue :
Can you please try to import the helm module, after that you will see it is not working.
@ucguy4u I'm on k8s.io/client-go v0.26.0 and it works fine for me; v0.26.1 also works fine
@zreigz