Skip to content

Instantly share code, notes, and snippets.

@chrislovecnm
Created October 1, 2017 04:32
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save chrislovecnm/abe503a9cdd0f90974cc9ecaf6ba114e to your computer and use it in GitHub Desktop.
Save chrislovecnm/abe503a9cdd0f90974cc9ecaf6ba114e to your computer and use it in GitHub Desktop.
IAM PR backup - just in case
From f54369807e6702dcfc5a19ccb7d979180f4770ff Mon Sep 17 00:00:00 2001
From: bjuncosa <borja.juncosa@socialpoint.es>
Date: Thu, 16 Mar 2017 12:39:40 +0100
Subject: [PATCH] Add feature: Custom IAM Instance Profile
This way Cluster IAM roles can be managed externally, either manually,
using cloudformation or any other tool.
---
cmd/kops/create_cluster_integration_test.go | 12 +-
cmd/kops/integration_test.go | 77 +++--
docs/cluster_spec.md | 17 +
docs/iam_roles.md | 31 +-
docs/security.md | 1 -
pkg/apis/kops/cluster.go | 17 +
pkg/apis/kops/v1alpha1/cluster.go | 21 +-
pkg/apis/kops/v1alpha1/zz_generated.conversion.go | 42 +++
pkg/apis/kops/v1alpha2/cluster.go | 15 +
pkg/apis/kops/v1alpha2/zz_generated.conversion.go | 42 +++
pkg/apis/kops/validation/validation.go | 30 ++
pkg/apis/kops/validation/validation_test.go | 62 +++-
pkg/featureflag/featureflag.go | 3 +
pkg/model/awsmodel/autoscalinggroup.go | 9 +-
pkg/model/iam.go | 127 +++++---
pkg/model/names.go | 54 +++-
tests/integration/custom_iam_role/id_rsa.pub | 1 +
tests/integration/custom_iam_role/in-v1alpha2.yaml | 82 +++++
tests/integration/custom_iam_role/kubernetes.tf | 348 +++++++++++++++++++++
upup/pkg/fi/cloudup/awstasks/iaminstanceprofile.go | 16 +-
upup/pkg/fi/cloudup/awstasks/iamrole.go | 12 +-
.../pkg/fi/cloudup/awstasks/launchconfiguration.go | 1 +
22 files changed, 920 insertions(+), 100 deletions(-)
create mode 100755 tests/integration/custom_iam_role/id_rsa.pub
create mode 100644 tests/integration/custom_iam_role/in-v1alpha2.yaml
create mode 100644 tests/integration/custom_iam_role/kubernetes.tf
diff --git a/cmd/kops/create_cluster_integration_test.go b/cmd/kops/create_cluster_integration_test.go
index 093ff87c6e..d69f3e4cb4 100644
--- a/cmd/kops/create_cluster_integration_test.go
+++ b/cmd/kops/create_cluster_integration_test.go
@@ -18,17 +18,19 @@ package main
import (
"bytes"
- "github.com/golang/glog"
"io/ioutil"
+ "path"
+ "strings"
+ "testing"
+ "time"
+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kops/cmd/kops/util"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/diff"
"k8s.io/kops/pkg/testutils"
- "path"
- "strings"
- "testing"
- "time"
+
+ "github.com/golang/glog"
)
var MagicTimestamp = metav1.Time{Time: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC)}
diff --git a/cmd/kops/integration_test.go b/cmd/kops/integration_test.go
index 29958623c0..b0f235748a 100644
--- a/cmd/kops/integration_test.go
+++ b/cmd/kops/integration_test.go
@@ -22,11 +22,7 @@ import (
"crypto/rsa"
"crypto/x509"
"encoding/pem"
- "golang.org/x/crypto/ssh"
"io/ioutil"
- "k8s.io/kops/cmd/kops/util"
- "k8s.io/kops/pkg/diff"
- "k8s.io/kops/pkg/testutils"
"os"
"path"
"reflect"
@@ -34,24 +30,31 @@ import (
"strings"
"testing"
"time"
+
+ "golang.org/x/crypto/ssh"
+
+ "k8s.io/kops/cmd/kops/util"
+ "k8s.io/kops/pkg/diff"
+ "k8s.io/kops/pkg/featureflag"
+ "k8s.io/kops/pkg/testutils"
)
// TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
func TestMinimal(t *testing.T) {
- runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha0", false, 1)
- runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha1", false, 1)
- runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha2", false, 1)
+ runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha0", false, 1, true)
+ runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha1", false, 1, true)
+ runTest(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha2", false, 1, true)
}
// TestHA runs the test on a simple HA configuration, similar to kops create cluster minimal.example.com --zones us-west-1a,us-west-1b,us-west-1c --master-count=3
func TestHA(t *testing.T) {
- runTest(t, "ha.example.com", "../../tests/integration/ha", "v1alpha1", false, 3)
- runTest(t, "ha.example.com", "../../tests/integration/ha", "v1alpha2", false, 3)
+ runTest(t, "ha.example.com", "../../tests/integration/ha", "v1alpha1", false, 3, true)
+ runTest(t, "ha.example.com", "../../tests/integration/ha", "v1alpha2", false, 3, true)
}
// TestComplex runs the test on a more complex configuration, intended to hit more of the edge cases
func TestComplex(t *testing.T) {
- runTest(t, "complex.example.com", "../../tests/integration/complex", "v1alpha2", false, 1)
+ runTest(t, "complex.example.com", "../../tests/integration/complex", "v1alpha2", false, 1, true)
}
// TestMinimalCloudformation runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
@@ -63,49 +66,55 @@ func TestMinimalCloudformation(t *testing.T) {
// TestMinimal_141 runs the test on a configuration from 1.4.1 release
func TestMinimal_141(t *testing.T) {
- runTest(t, "minimal-141.example.com", "../../tests/integration/minimal-141", "v1alpha0", false, 1)
+ runTest(t, "minimal-141.example.com", "../../tests/integration/minimal-141", "v1alpha0", false, 1, true)
}
// TestPrivateWeave runs the test on a configuration with private topology, weave networking
func TestPrivateWeave(t *testing.T) {
- runTest(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha1", true, 1)
- runTest(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha2", true, 1)
+ runTest(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha1", true, 1, true)
+ runTest(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha2", true, 1, true)
}
// TestPrivateFlannel runs the test on a configuration with private topology, flannel networking
func TestPrivateFlannel(t *testing.T) {
- runTest(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha1", true, 1)
- runTest(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha2", true, 1)
+ runTest(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha1", true, 1, true)
+ runTest(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha2", true, 1, true)
}
// TestPrivateCalico runs the test on a configuration with private topology, calico networking
func TestPrivateCalico(t *testing.T) {
- runTest(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha1", true, 1)
- runTest(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha2", true, 1)
+ runTest(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha1", true, 1, true)
+ runTest(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha2", true, 1, true)
}
// TestPrivateCanal runs the test on a configuration with private topology, canal networking
func TestPrivateCanal(t *testing.T) {
- runTest(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha1", true, 1)
- runTest(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha2", true, 1)
+ runTest(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha1", true, 1, true)
+ runTest(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha2", true, 1, true)
}
// TestPrivateKopeio runs the test on a configuration with private topology, kopeio networking
func TestPrivateKopeio(t *testing.T) {
- runTest(t, "privatekopeio.example.com", "../../tests/integration/privatekopeio", "v1alpha2", true, 1)
+ runTest(t, "privatekopeio.example.com", "../../tests/integration/privatekopeio", "v1alpha2", true, 1, true)
}
// TestPrivateDns runs the test on a configuration with private topology, private dns
func TestPrivateDns1(t *testing.T) {
- runTest(t, "privatedns1.example.com", "../../tests/integration/privatedns1", "v1alpha2", true, 1)
+ runTest(t, "privatedns1.example.com", "../../tests/integration/privatedns1", "v1alpha2", true, 1, true)
}
// TestPrivateDns runs the test on a configuration with private topology, private dns, extant vpc
func TestPrivateDns2(t *testing.T) {
- runTest(t, "privatedns2.example.com", "../../tests/integration/privatedns2", "v1alpha2", true, 1)
+ runTest(t, "privatedns2.example.com", "../../tests/integration/privatedns2", "v1alpha2", true, 1, true)
+}
+
+// TestCreateClusterCustomAuthProfile runs kops create cluster custom_iam_role.example.com --zones us-test-1a
+func TestCreateClusterCustomAuthProfile(t *testing.T) {
+ featureflag.ParseFlags("+CustomAuthProfileSupport")
+ runTest(t, "custom-iam-role.example.com", "../../tests/integration/custom_iam_role", "v1alpha2", false, 1, false)
}
-func runTest(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int) {
+func runTest(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, expectPolicies bool) {
var stdout bytes.Buffer
inputYAML := "in-" + version + ".yaml"
@@ -207,13 +216,21 @@ func runTest(t *testing.T, clusterName string, srcDir string, version string, pr
actualFilenames = append(actualFilenames, f.Name())
}
- expectedFilenames := []string{
- "aws_iam_role_masters." + clusterName + "_policy",
- "aws_iam_role_nodes." + clusterName + "_policy",
- "aws_iam_role_policy_masters." + clusterName + "_policy",
- "aws_iam_role_policy_nodes." + clusterName + "_policy",
- "aws_key_pair_kubernetes." + clusterName + "-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key",
- "aws_launch_configuration_nodes." + clusterName + "_user_data",
+ var expectedFilenames []string
+ if expectPolicies {
+ expectedFilenames = []string{
+ "aws_iam_role_masters." + clusterName + "_policy",
+ "aws_iam_role_nodes." + clusterName + "_policy",
+ "aws_iam_role_policy_masters." + clusterName + "_policy",
+ "aws_iam_role_policy_nodes." + clusterName + "_policy",
+ "aws_key_pair_kubernetes." + clusterName + "-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key",
+ "aws_launch_configuration_nodes." + clusterName + "_user_data",
+ }
+ } else {
+ expectedFilenames = []string{
+ "aws_key_pair_kubernetes." + clusterName + "-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key",
+ "aws_launch_configuration_nodes." + clusterName + "_user_data",
+ }
}
for i := 0; i < zones; i++ {
diff --git a/docs/cluster_spec.md b/docs/cluster_spec.md
index 7521b35002..4f1d32c256 100644
--- a/docs/cluster_spec.md
+++ b/docs/cluster_spec.md
@@ -19,6 +19,23 @@ spec:
dns: {}
```
+### authProfile ALPHA SUPPORT
+
+This configuration allows a cluster to utilize existing IAM instance profiles. Currently this configuration only supports aws.
+In order to use this feature you have to have to have the instance profile arn of a pre-existing role, and use the kops feature flag by setting
+`export KOPS_FEATURE_FLAGS=+CustomAuthProfileSupport`. This feature is in ALPHA release only, and can cause very unusual behavior
+with Kubernetes if use incorrectly.
+
+AuthRole example:
+
+```yaml
+spec:
+ authPofile:
+ master: arn:aws:iam::123417490108:instance-profile/kops-custom-master-role
+ node: arn:aws:iam::123417490108:instance-profile/kops-custom-node-role
+```
+
+### api
When configuring a LoadBalancer, you can also choose to have a public ELB or an internal (VPC only) ELB. The `type`
field should be `Public` or `Internal`.
diff --git a/docs/iam_roles.md b/docs/iam_roles.md
index bb2a0c4842..ce584dea5d 100644
--- a/docs/iam_roles.md
+++ b/docs/iam_roles.md
@@ -2,9 +2,9 @@
Two IAM roles are created for the cluster: one for the masters, and one for the nodes.
-> Work is being done on scoping permissions to the minimum required to setup and maintain cluster.
+> Work is being done on scoping permissions to the minimum required to setup and maintain cluster.
> Please note that currently all Pods running on your cluster have access to instance IAM role.
-> Consider using projects such as [kube2iam](https://github.com/jtblin/kube2iam) to prevent that.
+> Consider using projects such as [kube2iam](https://github.com/jtblin/kube2iam) to prevent that.
Master permissions:
@@ -136,3 +136,30 @@ You can have an additional policy for each kops role (node, master, bastion). Fo
}
]
```
+
+## Reusing Existing Instance Profile
+
+Sometimes you may need to reuse existing IAM Instance Profiles. You can do this
+through the `authProfile` cluster spec API field. This setting is highly advanced
+and only enabled via CustomAuthProfileSupport`` feature flag. Setting the wrong role
+permissions can impact various components inside of Kubernetes, and cause
+unexpected issues. This feature is in place to support the initial documenting and testing the creation of custom roles. Again, use the existing kops functionality, or reach out
+if you want to help!
+
+At this point, we do not have a full definition of the fine grain roles. Please refer
+[to](https://github.com/kubernetes/kops/issues/1873) for more information.
+
+Please use this feature wisely! Enable the feature flag by:
+
+```console
+$ export KOPS_FEATURE_FLAGS="+CustomAuthProfileSupport"
+```
+Inside the cluster spec define one or two instance profiles specific to the master and
+a node.
+
+```yaml
+spec:
+ authPofile:
+ master: arn:aws:iam::123417490108:instance-profile/kops-custom-master-role
+ node: arn:aws:iam::123417490108:instance-profile/kops-custom-node-role
+```
diff --git a/docs/security.md b/docs/security.md
index abba721576..5453db4b70 100644
--- a/docs/security.md
+++ b/docs/security.md
@@ -34,7 +34,6 @@ This stores the [config.json](https://docs.docker.com/engine/reference/commandli
All Pods running on your cluster have access to underlying instance IAM role.
Currently permission scope is quite broad. See [iam_roles.md](iam_roles.md) for details and ways to mitigate that.
-
## Kubernetes API
(this section is a work in progress)
diff --git a/pkg/apis/kops/cluster.go b/pkg/apis/kops/cluster.go
index 9e5b9ff5d4..5bd6e0b326 100644
--- a/pkg/apis/kops/cluster.go
+++ b/pkg/apis/kops/cluster.go
@@ -111,7 +111,13 @@ type ClusterSpec struct {
// 'external' do not apply updates automatically - they are applied manually or by an external system
// missing: default policy (currently OS security upgrades that do not require a reboot)
UpdatePolicy *string `json:"updatePolicy,omitempty"`
+
+ // Use an existing custom cloud security policy for the instances.
+ // Only supported for AWS
+ AuthProfile *AuthProfile `json:"authProfile,omitempty"`
+
// Additional policies to add for roles
+ // Map is keyed by: master, node
AdditionalPolicies *map[string]string `json:"additionalPolicies,omitempty"`
// EtcdClusters stores the configuration for each cluster
EtcdClusters []*EtcdClusterSpec `json:"etcdClusters,omitempty"`
@@ -224,6 +230,17 @@ type KubeDNSConfig struct {
ServerIP string `json:"serverIP,omitempty"`
}
+type AuthProfile struct {
+
+ // Name is the name of the instance profile to use for the master
+ // Format expected is arn:aws:iam::123456789012:instance-profile/ExampleMasterRole
+ Master *string `json:"master,omitempty"`
+
+ // Name is the name of the instance profile to use for the node
+ // Format expected is arn:aws:iam::123456789012:instance-profile/ExampleNodeRole
+ Node *string `json:"node,omitempty"`
+}
+
// EtcdStorageType defined the etcd storage backend
type EtcdStorageType string
diff --git a/pkg/apis/kops/v1alpha1/cluster.go b/pkg/apis/kops/v1alpha1/cluster.go
index 044867dae9..a4cdb17cce 100644
--- a/pkg/apis/kops/v1alpha1/cluster.go
+++ b/pkg/apis/kops/v1alpha1/cluster.go
@@ -131,6 +131,10 @@ type ClusterSpec struct {
// missing: default policy (currently OS security upgrades that do not require a reboot)
UpdatePolicy *string `json:"updatePolicy,omitempty"`
+ // Use an existing custom cloud security policy for the instances. One example is to specify the name
+ // of an AWS IAM role for the master and another for the nodes.
+ AuthProfile *AuthProfile `json:"authProfile,omitempty"`
+
// Additional policies to add for roles
AdditionalPolicies *map[string]string `json:"additionalPolicies,omitempty"`
@@ -327,13 +331,16 @@ type KubeDNSConfig struct {
ServerIP string `json:"serverIP,omitempty"`
}
-//type MasterConfig struct {
-// Name string `json:",omitempty"`
-//
-// Image string `json:",omitempty"`
-// Zone string `json:",omitempty"`
-// MachineType string `json:",omitempty"`
-//}
+type AuthProfile struct {
+
+ // Name is the name of the instance profile to use for the master
+ // Format expected is arn:aws:iam::123456789012:instance-profile/ExampleMasterRole
+ Master *string `json:"master,omitempty"`
+
+ // Name is the name of the instance profile to use for the node
+ // Format expected is arn:aws:iam::123456789012:instance-profile/ExampleNodeRole
+ Node *string `json:"node,omitempty"`
+}
type EtcdClusterSpec struct {
// Name is the name of the etcd cluster (main, events etc)
diff --git a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go
index 77abadf894..c423a71453 100644
--- a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go
+++ b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go
@@ -41,6 +41,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_kops_AlwaysAllowAuthorizationSpec_To_v1alpha1_AlwaysAllowAuthorizationSpec,
Convert_v1alpha1_Assets_To_kops_Assets,
Convert_kops_Assets_To_v1alpha1_Assets,
+ Convert_v1alpha1_AuthProfile_To_kops_AuthProfile,
+ Convert_kops_AuthProfile_To_v1alpha1_AuthProfile,
Convert_v1alpha1_AuthenticationSpec_To_kops_AuthenticationSpec,
Convert_kops_AuthenticationSpec_To_v1alpha1_AuthenticationSpec,
Convert_v1alpha1_AuthorizationSpec_To_kops_AuthorizationSpec,
@@ -222,6 +224,28 @@ func Convert_kops_Assets_To_v1alpha1_Assets(in *kops.Assets, out *Assets, s conv
return autoConvert_kops_Assets_To_v1alpha1_Assets(in, out, s)
}
+func autoConvert_v1alpha1_AuthProfile_To_kops_AuthProfile(in *AuthProfile, out *kops.AuthProfile, s conversion.Scope) error {
+ out.Master = in.Master
+ out.Node = in.Node
+ return nil
+}
+
+// Convert_v1alpha1_AuthProfile_To_kops_AuthProfile is an autogenerated conversion function.
+func Convert_v1alpha1_AuthProfile_To_kops_AuthProfile(in *AuthProfile, out *kops.AuthProfile, s conversion.Scope) error {
+ return autoConvert_v1alpha1_AuthProfile_To_kops_AuthProfile(in, out, s)
+}
+
+func autoConvert_kops_AuthProfile_To_v1alpha1_AuthProfile(in *kops.AuthProfile, out *AuthProfile, s conversion.Scope) error {
+ out.Master = in.Master
+ out.Node = in.Node
+ return nil
+}
+
+// Convert_kops_AuthProfile_To_v1alpha1_AuthProfile is an autogenerated conversion function.
+func Convert_kops_AuthProfile_To_v1alpha1_AuthProfile(in *kops.AuthProfile, out *AuthProfile, s conversion.Scope) error {
+ return autoConvert_kops_AuthProfile_To_v1alpha1_AuthProfile(in, out, s)
+}
+
func autoConvert_v1alpha1_AuthenticationSpec_To_kops_AuthenticationSpec(in *AuthenticationSpec, out *kops.AuthenticationSpec, s conversion.Scope) error {
if in.Kopeio != nil {
in, out := &in.Kopeio, &out.Kopeio
@@ -527,6 +551,15 @@ func autoConvert_v1alpha1_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
// WARNING: in.AdminAccess requires manual conversion: does not exist in peer-type
out.IsolateMasters = in.IsolateMasters
out.UpdatePolicy = in.UpdatePolicy
+ if in.AuthProfile != nil {
+ in, out := &in.AuthProfile, &out.AuthProfile
+ *out = new(kops.AuthProfile)
+ if err := Convert_v1alpha1_AuthProfile_To_kops_AuthProfile(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AuthProfile = nil
+ }
out.AdditionalPolicies = in.AdditionalPolicies
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
@@ -730,6 +763,15 @@ func autoConvert_kops_ClusterSpec_To_v1alpha1_ClusterSpec(in *kops.ClusterSpec,
// WARNING: in.KubernetesAPIAccess requires manual conversion: does not exist in peer-type
out.IsolateMasters = in.IsolateMasters
out.UpdatePolicy = in.UpdatePolicy
+ if in.AuthProfile != nil {
+ in, out := &in.AuthProfile, &out.AuthProfile
+ *out = new(AuthProfile)
+ if err := Convert_kops_AuthProfile_To_v1alpha1_AuthProfile(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AuthProfile = nil
+ }
out.AdditionalPolicies = in.AdditionalPolicies
if in.EtcdClusters != nil {
in, out := &in.EtcdClusters, &out.EtcdClusters
diff --git a/pkg/apis/kops/v1alpha2/cluster.go b/pkg/apis/kops/v1alpha2/cluster.go
index eb05a00003..309583942a 100644
--- a/pkg/apis/kops/v1alpha2/cluster.go
+++ b/pkg/apis/kops/v1alpha2/cluster.go
@@ -138,6 +138,10 @@ type ClusterSpec struct {
// missing: default policy (currently OS security upgrades that do not require a reboot)
UpdatePolicy *string `json:"updatePolicy,omitempty"`
+ // Use an existing custom cloud security policy for the instances. One example is to specify the name
+ // of an AWS IAM role for the master and another for the nodes.
+ AuthProfile *AuthProfile `json:"authProfile,omitempty"`
+
// Additional policies to add for roles
AdditionalPolicies *map[string]string `json:"additionalPolicies,omitempty"`
@@ -310,3 +314,14 @@ type HTTPProxy struct {
// User string `json:"user,omitempty"`
// Password string `json:"password,omitempty"`
}
+
+type AuthProfile struct {
+
+ // Name is the name of the instance profile to use for the master
+ // Format expected is arn:aws:iam::123456789012:instance-profile/ExampleMasterRole
+ Master *string `json:"master,omitempty"`
+
+ // Name is the name of the instance profile to use for the node
+ // Format expected is arn:aws:iam::123456789012:instance-profile/ExampleNodeRole
+ Node *string `json:"node,omitempty"`
+}
diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go
index 83516df44f..8a68a4b63e 100644
--- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go
+++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go
@@ -41,6 +41,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_kops_AlwaysAllowAuthorizationSpec_To_v1alpha2_AlwaysAllowAuthorizationSpec,
Convert_v1alpha2_Assets_To_kops_Assets,
Convert_kops_Assets_To_v1alpha2_Assets,
+ Convert_v1alpha2_AuthProfile_To_kops_AuthProfile,
+ Convert_kops_AuthProfile_To_v1alpha2_AuthProfile,
Convert_v1alpha2_AuthenticationSpec_To_kops_AuthenticationSpec,
Convert_kops_AuthenticationSpec_To_v1alpha2_AuthenticationSpec,
Convert_v1alpha2_AuthorizationSpec_To_kops_AuthorizationSpec,
@@ -228,6 +230,28 @@ func Convert_kops_Assets_To_v1alpha2_Assets(in *kops.Assets, out *Assets, s conv
return autoConvert_kops_Assets_To_v1alpha2_Assets(in, out, s)
}
+func autoConvert_v1alpha2_AuthProfile_To_kops_AuthProfile(in *AuthProfile, out *kops.AuthProfile, s conversion.Scope) error {
+ out.Master = in.Master
+ out.Node = in.Node
+ return nil
+}
+
+// Convert_v1alpha2_AuthProfile_To_kops_AuthProfile is an autogenerated conversion function.
+func Convert_v1alpha2_AuthProfile_To_kops_AuthProfile(in *AuthProfile, out *kops.AuthProfile, s conversion.Scope) error {
+ return autoConvert_v1alpha2_AuthProfile_To_kops_AuthProfile(in, out, s)
+}
+
+func autoConvert_kops_AuthProfile_To_v1alpha2_AuthProfile(in *kops.AuthProfile, out *AuthProfile, s conversion.Scope) error {
+ out.Master = in.Master
+ out.Node = in.Node
+ return nil
+}
+
+// Convert_kops_AuthProfile_To_v1alpha2_AuthProfile is an autogenerated conversion function.
+func Convert_kops_AuthProfile_To_v1alpha2_AuthProfile(in *kops.AuthProfile, out *AuthProfile, s conversion.Scope) error {
+ return autoConvert_kops_AuthProfile_To_v1alpha2_AuthProfile(in, out, s)
+}
+
func autoConvert_v1alpha2_AuthenticationSpec_To_kops_AuthenticationSpec(in *AuthenticationSpec, out *kops.AuthenticationSpec, s conversion.Scope) error {
if in.Kopeio != nil {
in, out := &in.Kopeio, &out.Kopeio
@@ -574,6 +598,15 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
out.KubernetesAPIAccess = in.KubernetesAPIAccess
out.IsolateMasters = in.IsolateMasters
out.UpdatePolicy = in.UpdatePolicy
+ if in.AuthProfile != nil {
+ in, out := &in.AuthProfile, &out.AuthProfile
+ *out = new(kops.AuthProfile)
+ if err := Convert_v1alpha2_AuthProfile_To_kops_AuthProfile(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AuthProfile = nil
+ }
out.AdditionalPolicies = in.AdditionalPolicies
if in.EtcdClusters != nil {
in, out := &in.EtcdClusters, &out.EtcdClusters
@@ -783,6 +816,15 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec,
out.KubernetesAPIAccess = in.KubernetesAPIAccess
out.IsolateMasters = in.IsolateMasters
out.UpdatePolicy = in.UpdatePolicy
+ if in.AuthProfile != nil {
+ in, out := &in.AuthProfile, &out.AuthProfile
+ *out = new(AuthProfile)
+ if err := Convert_kops_AuthProfile_To_v1alpha2_AuthProfile(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AuthProfile = nil
+ }
out.AdditionalPolicies = in.AdditionalPolicies
if in.EtcdClusters != nil {
in, out := &in.EtcdClusters, &out.EtcdClusters
diff --git a/pkg/apis/kops/validation/validation.go b/pkg/apis/kops/validation/validation.go
index 69710eba8f..e58e22ea71 100644
--- a/pkg/apis/kops/validation/validation.go
+++ b/pkg/apis/kops/validation/validation.go
@@ -19,6 +19,7 @@ package validation
import (
"fmt"
"net"
+ "regexp"
"strings"
"k8s.io/apimachinery/pkg/api/validation"
@@ -60,6 +61,10 @@ func validateClusterSpec(spec *kops.ClusterSpec, fieldPath *field.Path) field.Er
allErrs = append(allErrs, validateHook(&spec.Hooks[i], fieldPath.Child("hooks").Index(i))...)
}
+ if spec.AuthProfile != nil {
+ allErrs = append(allErrs, validateAuthProfile(spec.AuthProfile, fieldPath.Child("authProfile"))...)
+ }
+
return allErrs
}
@@ -154,3 +159,28 @@ func validateExecContainerAction(v *kops.ExecContainerAction, fldPath *field.Pat
return allErrs
}
+
+// format is arn:aws:iam::123456789012:instance-profile/S3Access
+var validARN = regexp.MustCompile(`^arn:aws:iam::\d+:instance-profile\/\S+$`)
+
+// validateAuthProfile checks the String values for the AuthProfile
+func validateAuthProfile(v *kops.AuthProfile, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if v.Node != nil {
+ arn := *v.Node
+ if !validARN.MatchString(arn) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("Node"), arn,
+ "Node AuthProfile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsNodeExampleRole"))
+ }
+ }
+ if v.Master != nil {
+ arn := *v.Master
+ if !validARN.MatchString(arn) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("Master"), arn,
+ "Node AuthProfile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsMasterExampleRole"))
+ }
+ }
+
+ return allErrs
+}
diff --git a/pkg/apis/kops/validation/validation_test.go b/pkg/apis/kops/validation/validation_test.go
index abdd6f3330..24628f1f34 100644
--- a/pkg/apis/kops/validation/validation_test.go
+++ b/pkg/apis/kops/validation/validation_test.go
@@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kops/pkg/apis/kops"
+ "k8s.io/kops/upup/pkg/fi"
"testing"
)
@@ -80,10 +81,69 @@ func TestValidateCIDR(t *testing.T) {
}
}
+func s(v string) *string {
+ return fi.String(v)
+}
+
+func TestValidateAuth(t *testing.T) {
+ grid := []struct {
+ Input *kops.AuthProfile
+ ExpectedErrors []string
+ ExpectedDetail string
+ }{
+ {
+ Input: &kops.AuthProfile{
+ Master: s("arn:aws:iam::123456789012:instance-profile/S3Access"),
+ },
+ },
+ {
+ Input: &kops.AuthProfile{
+ Master: s("arn:aws:iam::123456789012:instance-profile/has/path/S3Access"),
+ },
+ },
+ {
+ Input: &kops.AuthProfile{
+ Master: s("42"),
+ },
+ ExpectedErrors: []string{"Invalid value::AuthProfile.Master"},
+ ExpectedDetail: "Node AuthProfile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsMasterExampleRole",
+ },
+ {
+ Input: &kops.AuthProfile{
+ Node: s("arn:aws:iam::123456789012:group/division_abc/subdivision_xyz/product_A/Developers"),
+ },
+ ExpectedErrors: []string{"Invalid value::AuthProfile.Node"},
+ ExpectedDetail: "Node AuthProfile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsNodeExampleRole",
+ },
+ }
+
+ for _, g := range grid {
+ errs := validateAuthProfile(g.Input, field.NewPath("AuthProfile"))
+
+ testErrors(t, g.Input, errs, g.ExpectedErrors)
+
+ if g.ExpectedDetail != "" {
+ found := false
+ for _, err := range errs {
+ if err.Detail == g.ExpectedDetail {
+ found = true
+ }
+ }
+ if !found {
+ for _, err := range errs {
+ t.Logf("found detail: %q", err.Detail)
+ }
+
+ t.Errorf("did not find expected error %q", g.ExpectedDetail)
+ }
+ }
+ }
+}
+
func testErrors(t *testing.T, context interface{}, actual field.ErrorList, expectedErrors []string) {
if len(expectedErrors) == 0 {
if len(actual) != 0 {
- t.Errorf("unexpected errors from %q: %v", context, actual)
+ t.Errorf("unexpected errors from %v: %+v", context, actual)
}
} else {
errStrings := sets.NewString()
diff --git a/pkg/featureflag/featureflag.go b/pkg/featureflag/featureflag.go
index 81b8af0f95..d924662967 100644
--- a/pkg/featureflag/featureflag.go
+++ b/pkg/featureflag/featureflag.go
@@ -52,6 +52,9 @@ var VSphereCloudProvider = New("VSphereCloudProvider", Bool(false))
var EnableExternalDNS = New("EnableExternalDNS", Bool(false))
+// CustomAuthProfileSupport if set will allow for the reuse of an existing security profile
+var CustomAuthProfileSupport = New("CustomAuthProfileSupport", Bool(false))
+
var flags = make(map[string]*FeatureFlag)
var flagsMutex sync.Mutex
diff --git a/pkg/model/awsmodel/autoscalinggroup.go b/pkg/model/awsmodel/autoscalinggroup.go
index cd69ddd4dd..04097cba74 100644
--- a/pkg/model/awsmodel/autoscalinggroup.go
+++ b/pkg/model/awsmodel/autoscalinggroup.go
@@ -75,6 +75,11 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
volumeType = DefaultVolumeType
}
+ iamProfileLink, err := b.LinkToIAMInstanceProfile(ig)
+ if err != nil {
+ return fmt.Errorf("unable to find iam profile task link: %v", err)
+ }
+
t := &awstasks.LaunchConfiguration{
Name: s(name),
Lifecycle: b.Lifecycle,
@@ -82,7 +87,7 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
SecurityGroups: []*awstasks.SecurityGroup{
b.LinkToSecurityGroup(ig.Spec.Role),
},
- IAMInstanceProfile: b.LinkToIAMInstanceProfile(ig),
+ IAMInstanceProfile: iamProfileLink,
ImageID: s(ig.Spec.Image),
InstanceType: s(ig.Spec.MachineType),
@@ -111,8 +116,6 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
t.SecurityGroups = append(t.SecurityGroups, sgTask)
}
- var err error
-
if t.SSHKey, err = b.LinkToSSHKey(); err != nil {
return err
}
diff --git a/pkg/model/iam.go b/pkg/model/iam.go
index 3428870f84..5d7920206e 100644
--- a/pkg/model/iam.go
+++ b/pkg/model/iam.go
@@ -19,14 +19,17 @@ package model
import (
"encoding/json"
"fmt"
+ "reflect"
+ "strings"
+ "text/template"
+
"github.com/golang/glog"
+
"k8s.io/kops/pkg/apis/kops"
+ "k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model/iam"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
- "reflect"
- "strings"
- "text/template"
)
// IAMModelBuilder configures IAM objects
@@ -50,6 +53,7 @@ const RolePolicyTemplate = `{
}`
func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error {
+
// Collect the roles in use
var roles []kops.InstanceGroupRole
for _, ig := range b.InstanceGroups {
@@ -66,54 +70,100 @@ func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error {
// Generate IAM objects etc for each role
for _, role := range roles {
- name := b.IAMName(role)
+ name, err := b.IAMName(role)
+ if err != nil {
+ return fmt.Errorf("unable to set role name: %s", role)
+ }
var iamRole *awstasks.IAMRole
- {
- rolePolicy, err := b.buildAWSIAMRolePolicy()
- if err != nil {
- return err
- }
+ var arn string
- iamRole = &awstasks.IAMRole{
- Name: s(name),
- Lifecycle: b.Lifecycle,
+ // Want to use a FeatureFlag in front of this to allow the validation to harden
+ if b.Cluster.Spec.AuthProfile != nil && featureflag.CustomAuthProfileSupport.Enabled() {
+
+ roleAsString := string(role)
- RolePolicyDocument: fi.WrapResource(rolePolicy),
- ExportWithID: s(strings.ToLower(string(role)) + "s"),
+ if role == kops.InstanceGroupRoleMaster && b.Cluster.Spec.AuthProfile.Master != nil {
+ arn = *b.Cluster.Spec.AuthProfile.Master
+ glog.Warningf("Custom Instance Profile Support is enabled, kops will use %s, for %s role, this is an advanced feature please use with great care", arn, roleAsString)
+ } else if role == kops.InstanceGroupRoleNode && b.Cluster.Spec.AuthProfile.Node != nil {
+ arn = *b.Cluster.Spec.AuthProfile.Node
+ glog.Warningf("Custom Instance Profile Support is enabled, kops will use %s, for %s role, this is an advanced feature please use with great care", arn, roleAsString)
}
- c.AddTask(iamRole)
}
- {
- iamPolicy := &iam.IAMPolicyResource{
- Builder: &iam.IAMPolicyBuilder{
- Cluster: b.Cluster,
- Role: role,
- Region: b.Region,
- },
+ // If we've specified a custom instance profile for this cluster role,
+ // do not create a new one
+ // TODO Validate instance profile role against role that kops generates
+ // TODO Where is out entry point to validate the role?
+ // TODO We need to run the aws finder and we do not have access to the cloud context.
+ // TODO We need to validate somewhere else.
+ // TODO We also need to validate that we do not have additional policies as well
+
+ // Steps to validate
+ // 1. get the role
+ // 2. generate the role out of iam_builder
+ // 3. diff the two
+ if arn != "" {
+ glog.V(8).Infof("re-using instance profile %s", name)
+ iamInstanceProfile := &awstasks.IAMInstanceProfile{
+ Name: s(name),
+ ID: s(arn),
+ Lifecycle: b.Lifecycle,
+ Shared: fi.Bool(true),
+ // We set Policy Document to nil as this role will be managed externally
}
+ c.AddTask(iamInstanceProfile)
+ // we do not add any other IAM tasks
+ continue
- // This is slightly tricky; we need to know the hosted zone id,
- // but we might be creating the hosted zone dynamically.
+ } else {
+
+ {
+ rolePolicy, err := b.buildAWSIAMRolePolicy()
+ if err != nil {
+ return err
+ }
+
+ iamRole = &awstasks.IAMRole{
+ Name: s(name),
+ RolePolicyDocument: fi.WrapResource(rolePolicy),
+ ExportWithID: s(strings.ToLower(string(role)) + "s"),
+ Lifecycle: b.Lifecycle,
+ }
+ c.AddTask(iamRole)
- // TODO: I don't love this technique for finding the task by name & modifying it
- dnsZoneTask, found := c.Tasks["DNSZone/"+b.NameForDNSZone()]
- if found {
- iamPolicy.DNSZone = dnsZoneTask.(*awstasks.DNSZone)
- } else {
- glog.V(2).Infof("Task %q not found; won't set route53 permissions in IAM", "DNSZone/"+b.NameForDNSZone())
}
- t := &awstasks.IAMRolePolicy{
- Name: s(name),
- Lifecycle: b.Lifecycle,
+ {
+ iamPolicy := &iam.IAMPolicyResource{
+ Builder: &iam.IAMPolicyBuilder{
+ Cluster: b.Cluster,
+ Role: role,
+ Region: b.Region,
+ },
+ }
+
+ // This is slightly tricky; we need to know the hosted zone id,
+ // but we might be creating the hosted zone dynamically.
- Role: iamRole,
- PolicyDocument: iamPolicy,
+ // TODO: I don't love this technique for finding the task by name & modifying it
+ dnsZoneTask, found := c.Tasks["DNSZone/"+b.NameForDNSZone()]
+ if found {
+ iamPolicy.DNSZone = dnsZoneTask.(*awstasks.DNSZone)
+ } else {
+ glog.V(2).Infof("Task %q not found; won't set route53 permissions in IAM", "DNSZone/"+b.NameForDNSZone())
+ }
+
+ t := &awstasks.IAMRolePolicy{
+ Name: s(name),
+ Role: iamRole,
+ PolicyDocument: iamPolicy,
+ Lifecycle: b.Lifecycle,
+ }
+ c.AddTask(t)
}
- c.AddTask(t)
}
var iamInstanceProfile *awstasks.IAMInstanceProfile
@@ -127,9 +177,8 @@ func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error {
{
iamInstanceProfileRole := &awstasks.IAMInstanceProfileRole{
- Name: s(name),
- Lifecycle: b.Lifecycle,
-
+ Name: s(name),
+ Lifecycle: b.Lifecycle,
InstanceProfile: iamInstanceProfile,
Role: iamRole,
}
diff --git a/pkg/model/names.go b/pkg/model/names.go
index 4825987659..e3c1aaabad 100644
--- a/pkg/model/names.go
+++ b/pkg/model/names.go
@@ -21,6 +21,7 @@ import (
"github.com/golang/glog"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
+ "regexp"
)
func (b *KopsModelContext) SecurityGroupName(role kops.InstanceGroupRole) string {
@@ -96,24 +97,59 @@ func (b *KopsModelContext) NameForDNSZone() string {
return name
}
-func (b *KopsModelContext) IAMName(role kops.InstanceGroupRole) string {
+func (b *KopsModelContext) IAMName(role kops.InstanceGroupRole) (string, error) {
+ glog.V(2).Infof("finding iam name for role: %s", string(role))
+ authRole := b.Cluster.Spec.AuthProfile
switch role {
case kops.InstanceGroupRoleMaster:
- return "masters." + b.ClusterName()
- case kops.InstanceGroupRoleBastion:
- return "bastions." + b.ClusterName()
+ if authRole != nil && authRole.Master != nil {
+ name, err := findCustomAuthNameFromArn(authRole.Master)
+ if err != nil {
+ return "", err
+ }
+ glog.V(2).Infof("using custom name for master iam: %s", name)
+ return name, nil
+ }
+ return "masters." + b.ClusterName(), nil
case kops.InstanceGroupRoleNode:
- return "nodes." + b.ClusterName()
+ if authRole != nil && authRole.Node != nil {
+ name, err := findCustomAuthNameFromArn(authRole.Node)
+ if err != nil {
+ return "", err
+ }
+ glog.V(2).Infof("using custom name for node iam: %s", name)
+ return name, nil
+ }
+ return "nodes." + b.ClusterName(), nil
+ case kops.InstanceGroupRoleBastion:
+ return "bastions." + b.ClusterName(), nil
default:
glog.Fatalf("unknown InstanceGroup Role: %q", role)
- return ""
+ return "", fmt.Errorf("unknown InstanceGroup Role: %q", role)
+ }
+}
+
+var RoleNamRegExp = regexp.MustCompile(`([^/]+$)`)
+
+func findCustomAuthNameFromArn(arn *string) (string, error) {
+ if arn == nil || *arn == "" {
+ return "", fmt.Errorf("unable to parse role arn as it is not set")
+ }
+ rs := RoleNamRegExp.FindStringSubmatch(*arn)
+ if len(rs) >= 2 {
+ return rs[1], nil
}
+
+ return "", fmt.Errorf("unable to parse role arn %q", arn)
}
-func (b *KopsModelContext) LinkToIAMInstanceProfile(ig *kops.InstanceGroup) *awstasks.IAMInstanceProfile {
- name := b.IAMName(ig.Spec.Role)
- return &awstasks.IAMInstanceProfile{Name: &name}
+func (b *KopsModelContext) LinkToIAMInstanceProfile(ig *kops.InstanceGroup) (*awstasks.IAMInstanceProfile, error) {
+ name, err := b.IAMName(ig.Spec.Role)
+ if err != nil {
+ return nil, err
+ }
+ return &awstasks.IAMInstanceProfile{Name: &name}, nil
}
// SSHKeyName computes a unique SSH key name, combining the cluster name and the SSH public key fingerprint
diff --git a/tests/integration/custom_iam_role/id_rsa.pub b/tests/integration/custom_iam_role/id_rsa.pub
new file mode 100755
index 0000000000..81cb012783
--- /dev/null
+++ b/tests/integration/custom_iam_role/id_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==
diff --git a/tests/integration/custom_iam_role/in-v1alpha2.yaml b/tests/integration/custom_iam_role/in-v1alpha2.yaml
new file mode 100644
index 0000000000..32e97e1796
--- /dev/null
+++ b/tests/integration/custom_iam_role/in-v1alpha2.yaml
@@ -0,0 +1,82 @@
+apiVersion: kops/v1alpha2
+kind: Cluster
+metadata:
+ creationTimestamp: 2017-01-01T00:00:00Z
+ name: custom-iam-role.example.com
+spec:
+ api:
+ dns: {}
+ authProfile:
+ master: "arn:aws:iam::4222917490108:instance-profile/kops-custom-master-role"
+ node: "arn:aws:iam::422917490108:instance-profile/kops-custom-node-role"
+ channel: stable
+ cloudProvider: aws
+ configBase: memfs://tests/custom-iam-role.example.com
+ etcdClusters:
+ - etcdMembers:
+ - instanceGroup: master-us-test-1a
+ name: a
+ name: main
+ - etcdMembers:
+ - instanceGroup: master-us-test-1a
+ name: a
+ name: events
+ kubernetesApiAccess:
+ - 0.0.0.0/0
+ kubernetesVersion: v1.6.4
+ masterPublicName: api.custom-iam-role.example.com
+ networkCIDR: 172.20.0.0/16
+ networking:
+ kubenet: {}
+ nonMasqueradeCIDR: 100.64.0.0/10
+ roleCustomIamRoles:
+ Master: foo
+ Node: bar
+ sshAccess:
+ - 0.0.0.0/0
+ subnets:
+ - cidr: 172.20.32.0/19
+ name: us-test-1a
+ type: Public
+ zone: us-test-1a
+ topology:
+ dns:
+ type: Public
+ masters: public
+ nodes: public
+
+---
+
+apiVersion: kops/v1alpha2
+kind: InstanceGroup
+metadata:
+ creationTimestamp: 2017-01-01T00:00:00Z
+ labels:
+ kops.k8s.io/cluster: custom-iam-role.example.com
+ name: master-us-test-1a
+spec:
+ image: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09
+ machineType: m3.medium
+ maxSize: 1
+ minSize: 1
+ role: Master
+ subnets:
+ - us-test-1a
+
+---
+
+apiVersion: kops/v1alpha2
+kind: InstanceGroup
+metadata:
+ creationTimestamp: 2017-01-01T00:00:00Z
+ labels:
+ kops.k8s.io/cluster: custom-iam-role.example.com
+ name: nodes
+spec:
+ image: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09
+ machineType: t2.medium
+ maxSize: 2
+ minSize: 2
+ role: Node
+ subnets:
+ - us-test-1a
diff --git a/tests/integration/custom_iam_role/kubernetes.tf b/tests/integration/custom_iam_role/kubernetes.tf
new file mode 100644
index 0000000000..857c82668d
--- /dev/null
+++ b/tests/integration/custom_iam_role/kubernetes.tf
@@ -0,0 +1,348 @@
+output "cluster_name" {
+ value = "custom-iam-role.example.com"
+}
+
+output "master_security_group_ids" {
+ value = ["${aws_security_group.masters-custom-iam-role-example-com.id}"]
+}
+
+output "node_security_group_ids" {
+ value = ["${aws_security_group.nodes-custom-iam-role-example-com.id}"]
+}
+
+output "node_subnet_ids" {
+ value = ["${aws_subnet.us-test-1a-custom-iam-role-example-com.id}"]
+}
+
+output "region" {
+ value = "us-test-1"
+}
+
+output "vpc_id" {
+ value = "${aws_vpc.custom-iam-role-example-com.id}"
+}
+
+resource "aws_autoscaling_group" "master-us-test-1a-masters-custom-iam-role-example-com" {
+ name = "master-us-test-1a.masters.custom-iam-role.example.com"
+ launch_configuration = "${aws_launch_configuration.master-us-test-1a-masters-custom-iam-role-example-com.id}"
+ max_size = 1
+ min_size = 1
+ vpc_zone_identifier = ["${aws_subnet.us-test-1a-custom-iam-role-example-com.id}"]
+
+ tag = {
+ key = "KubernetesCluster"
+ value = "custom-iam-role.example.com"
+ propagate_at_launch = true
+ }
+
+ tag = {
+ key = "Name"
+ value = "master-us-test-1a.masters.custom-iam-role.example.com"
+ propagate_at_launch = true
+ }
+
+ tag = {
+ key = "k8s.io/role/master"
+ value = "1"
+ propagate_at_launch = true
+ }
+}
+
+resource "aws_autoscaling_group" "nodes-custom-iam-role-example-com" {
+ name = "nodes.custom-iam-role.example.com"
+ launch_configuration = "${aws_launch_configuration.nodes-custom-iam-role-example-com.id}"
+ max_size = 2
+ min_size = 2
+ vpc_zone_identifier = ["${aws_subnet.us-test-1a-custom-iam-role-example-com.id}"]
+
+ tag = {
+ key = "KubernetesCluster"
+ value = "custom-iam-role.example.com"
+ propagate_at_launch = true
+ }
+
+ tag = {
+ key = "Name"
+ value = "nodes.custom-iam-role.example.com"
+ propagate_at_launch = true
+ }
+
+ tag = {
+ key = "k8s.io/role/node"
+ value = "1"
+ propagate_at_launch = true
+ }
+}
+
+resource "aws_ebs_volume" "a-etcd-events-custom-iam-role-example-com" {
+ availability_zone = "us-test-1a"
+ size = 20
+ type = "gp2"
+ encrypted = false
+
+ tags = {
+ KubernetesCluster = "custom-iam-role.example.com"
+ Name = "a.etcd-events.custom-iam-role.example.com"
+ "k8s.io/etcd/events" = "a/a"
+ "k8s.io/role/master" = "1"
+ }
+}
+
+resource "aws_ebs_volume" "a-etcd-main-custom-iam-role-example-com" {
+ availability_zone = "us-test-1a"
+ size = 20
+ type = "gp2"
+ encrypted = false
+
+ tags = {
+ KubernetesCluster = "custom-iam-role.example.com"
+ Name = "a.etcd-main.custom-iam-role.example.com"
+ "k8s.io/etcd/main" = "a/a"
+ "k8s.io/role/master" = "1"
+ }
+}
+
+resource "aws_internet_gateway" "custom-iam-role-example-com" {
+ vpc_id = "${aws_vpc.custom-iam-role-example-com.id}"
+
+ tags = {
+ KubernetesCluster = "custom-iam-role.example.com"
+ Name = "custom-iam-role.example.com"
+ }
+}
+
+resource "aws_key_pair" "kubernetes-custom-iam-role-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
+ key_name = "kubernetes.custom-iam-role.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
+ public_key = "${file("${path.module}/data/aws_key_pair_kubernetes.custom-iam-role.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")}"
+}
+
+resource "aws_launch_configuration" "master-us-test-1a-masters-custom-iam-role-example-com" {
+ name_prefix = "master-us-test-1a.masters.custom-iam-role.example.com-"
+ image_id = "ami-15000000"
+ instance_type = "m3.medium"
+ key_name = "${aws_key_pair.kubernetes-custom-iam-role-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
+ iam_instance_profile = "arn:aws:iam::4222917490108:instance-profile/kops-custom-master-role"
+ security_groups = ["${aws_security_group.masters-custom-iam-role-example-com.id}"]
+ associate_public_ip_address = true
+ user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.custom-iam-role.example.com_user_data")}"
+
+ root_block_device = {
+ volume_type = "gp2"
+ volume_size = 64
+ delete_on_termination = true
+ }
+
+ ephemeral_block_device = {
+ device_name = "/dev/sdc"
+ virtual_name = "ephemeral0"
+ }
+
+ lifecycle = {
+ create_before_destroy = true
+ }
+}
+
+resource "aws_launch_configuration" "nodes-custom-iam-role-example-com" {
+ name_prefix = "nodes.custom-iam-role.example.com-"
+ image_id = "ami-15000000"
+ instance_type = "t2.medium"
+ key_name = "${aws_key_pair.kubernetes-custom-iam-role-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
+ iam_instance_profile = "arn:aws:iam::422917490108:instance-profile/kops-custom-node-role"
+ security_groups = ["${aws_security_group.nodes-custom-iam-role-example-com.id}"]
+ associate_public_ip_address = true
+ user_data = "${file("${path.module}/data/aws_launch_configuration_nodes.custom-iam-role.example.com_user_data")}"
+
+ root_block_device = {
+ volume_type = "gp2"
+ volume_size = 128
+ delete_on_termination = true
+ }
+
+ lifecycle = {
+ create_before_destroy = true
+ }
+}
+
+resource "aws_route" "0-0-0-0--0" {
+ route_table_id = "${aws_route_table.custom-iam-role-example-com.id}"
+ destination_cidr_block = "0.0.0.0/0"
+ gateway_id = "${aws_internet_gateway.custom-iam-role-example-com.id}"
+}
+
+resource "aws_route_table" "custom-iam-role-example-com" {
+ vpc_id = "${aws_vpc.custom-iam-role-example-com.id}"
+
+ tags = {
+ KubernetesCluster = "custom-iam-role.example.com"
+ Name = "custom-iam-role.example.com"
+ }
+}
+
+resource "aws_route_table_association" "us-test-1a-custom-iam-role-example-com" {
+ subnet_id = "${aws_subnet.us-test-1a-custom-iam-role-example-com.id}"
+ route_table_id = "${aws_route_table.custom-iam-role-example-com.id}"
+}
+
+resource "aws_security_group" "masters-custom-iam-role-example-com" {
+ name = "masters.custom-iam-role.example.com"
+ vpc_id = "${aws_vpc.custom-iam-role-example-com.id}"
+ description = "Security group for masters"
+
+ tags = {
+ KubernetesCluster = "custom-iam-role.example.com"
+ Name = "masters.custom-iam-role.example.com"
+ }
+}
+
+resource "aws_security_group" "nodes-custom-iam-role-example-com" {
+ name = "nodes.custom-iam-role.example.com"
+ vpc_id = "${aws_vpc.custom-iam-role-example-com.id}"
+ description = "Security group for nodes"
+
+ tags = {
+ KubernetesCluster = "custom-iam-role.example.com"
+ Name = "nodes.custom-iam-role.example.com"
+ }
+}
+
+resource "aws_security_group_rule" "all-master-to-master" {
+ type = "ingress"
+ security_group_id = "${aws_security_group.masters-custom-iam-role-example-com.id}"
+ source_security_group_id = "${aws_security_group.masters-custom-iam-role-example-com.id}"
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+}
+
+resource "aws_security_group_rule" "all-master-to-node" {
+ type = "ingress"
+ security_group_id = "${aws_security_group.nodes-custom-iam-role-example-com.id}"
+ source_security_group_id = "${aws_security_group.masters-custom-iam-role-example-com.id}"
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+}
+
+resource "aws_security_group_rule" "all-node-to-node" {
+ type = "ingress"
+ security_group_id = "${aws_security_group.nodes-custom-iam-role-example-com.id}"
+ source_security_group_id = "${aws_security_group.nodes-custom-iam-role-example-com.id}"
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+}
+
+resource "aws_security_group_rule" "https-external-to-master-0-0-0-0--0" {
+ type = "ingress"
+ security_group_id = "${aws_security_group.masters-custom-iam-role-example-com.id}"
+ from_port = 443
+ to_port = 443
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "master-egress" {
+ type = "egress"
+ security_group_id = "${aws_security_group.masters-custom-iam-role-example-com.id}"
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "node-egress" {
+ type = "egress"
+ security_group_id = "${aws_security_group.nodes-custom-iam-role-example-com.id}"
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
+ type = "ingress"
+ security_group_id = "${aws_security_group.masters-custom-iam-role-example-com.id}"
+ source_security_group_id = "${aws_security_group.nodes-custom-iam-role-example-com.id}"
+ from_port = 1
+ to_port = 4000
+ protocol = "tcp"
+}
+
+resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
+ type = "ingress"
+ security_group_id = "${aws_security_group.masters-custom-iam-role-example-com.id}"
+ source_security_group_id = "${aws_security_group.nodes-custom-iam-role-example-com.id}"
+ from_port = 4003
+ to_port = 65535
+ protocol = "tcp"
+}
+
+resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
+ type = "ingress"
+ security_group_id = "${aws_security_group.masters-custom-iam-role-example-com.id}"
+ source_security_group_id = "${aws_security_group.nodes-custom-iam-role-example-com.id}"
+ from_port = 1
+ to_port = 65535
+ protocol = "udp"
+}
+
+resource "aws_security_group_rule" "ssh-external-to-master-0-0-0-0--0" {
+ type = "ingress"
+ security_group_id = "${aws_security_group.masters-custom-iam-role-example-com.id}"
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "ssh-external-to-node-0-0-0-0--0" {
+ type = "ingress"
+ security_group_id = "${aws_security_group.nodes-custom-iam-role-example-com.id}"
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_subnet" "us-test-1a-custom-iam-role-example-com" {
+ vpc_id = "${aws_vpc.custom-iam-role-example-com.id}"
+ cidr_block = "172.20.32.0/19"
+ availability_zone = "us-test-1a"
+
+ tags = {
+ KubernetesCluster = "custom-iam-role.example.com"
+ Name = "us-test-1a.custom-iam-role.example.com"
+ "kubernetes.io/cluster/custom-iam-role.example.com" = "owned"
+ }
+}
+
+resource "aws_vpc" "custom-iam-role-example-com" {
+ cidr_block = "172.20.0.0/16"
+ enable_dns_hostnames = true
+ enable_dns_support = true
+
+ tags = {
+ KubernetesCluster = "custom-iam-role.example.com"
+ Name = "custom-iam-role.example.com"
+ "kubernetes.io/cluster/custom-iam-role.example.com" = "owned"
+ }
+}
+
+resource "aws_vpc_dhcp_options" "custom-iam-role-example-com" {
+ domain_name = "us-test-1.compute.internal"
+ domain_name_servers = ["AmazonProvidedDNS"]
+
+ tags = {
+ KubernetesCluster = "custom-iam-role.example.com"
+ Name = "custom-iam-role.example.com"
+ }
+}
+
+resource "aws_vpc_dhcp_options_association" "custom-iam-role-example-com" {
+ vpc_id = "${aws_vpc.custom-iam-role-example-com.id}"
+ dhcp_options_id = "${aws_vpc_dhcp_options.custom-iam-role-example-com.id}"
+}
+
+terraform = {
+ required_version = ">= 0.9.3"
+}
diff --git a/upup/pkg/fi/cloudup/awstasks/iaminstanceprofile.go b/upup/pkg/fi/cloudup/awstasks/iaminstanceprofile.go
index e7e995cdb6..fa2d644ff5 100644
--- a/upup/pkg/fi/cloudup/awstasks/iaminstanceprofile.go
+++ b/upup/pkg/fi/cloudup/awstasks/iaminstanceprofile.go
@@ -34,8 +34,9 @@ import (
type IAMInstanceProfile struct {
Name *string
Lifecycle *fi.Lifecycle
-
- ID *string
+ // Shared is set if this is a shared instance profile
+ Shared *bool
+ ID *string
}
var _ fi.CompareWithID = &IAMInstanceProfile{}
@@ -103,6 +104,14 @@ func (s *IAMInstanceProfile) CheckChanges(a, e, changes *IAMInstanceProfile) err
}
func (_ *IAMInstanceProfile) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *IAMInstanceProfile) error {
+ shared := fi.BoolValue(e.Shared)
+ if shared {
+ if a == nil {
+ return fmt.Errorf("instance role profile with id %q not found", fi.StringValue(e.ID))
+ }
+
+ return nil
+ }
if a == nil {
glog.V(2).Infof("Creating IAMInstanceProfile with Name:%q", *e.Name)
@@ -154,6 +163,9 @@ func (_ *IAMInstanceProfile) RenderTerraform(t *terraform.TerraformTarget, a, e,
}
func (e *IAMInstanceProfile) TerraformLink() *terraform.Literal {
+ if e.Shared != nil && *e.Shared {
+ return terraform.LiteralFromStringValue(*e.ID)
+ }
return terraform.LiteralProperty("aws_iam_instance_profile", *e.Name, "id")
}
diff --git a/upup/pkg/fi/cloudup/awstasks/iamrole.go b/upup/pkg/fi/cloudup/awstasks/iamrole.go
index 57f2327d9b..afbc02b890 100644
--- a/upup/pkg/fi/cloudup/awstasks/iamrole.go
+++ b/upup/pkg/fi/cloudup/awstasks/iamrole.go
@@ -194,6 +194,12 @@ type terraformIAMRole struct {
}
func (_ *IAMRole) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *IAMRole) error {
+
+ // TODO how can I do this better, but we are reusing the role
+ if e.RolePolicyDocument == nil {
+ return nil
+ }
+
policy, err := t.AddFile("aws_iam_role", *e.Name, "policy", e.RolePolicyDocument)
if err != nil {
return fmt.Errorf("error rendering RolePolicyDocument: %v", err)
@@ -213,7 +219,11 @@ func (_ *IAMRole) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *I
}
func (e *IAMRole) TerraformLink() *terraform.Literal {
- return terraform.LiteralProperty("aws_iam_role", *e.Name, "name")
+ if e.RolePolicyDocument != nil {
+ return terraform.LiteralProperty("aws_iam_role", *e.Name, "name")
+ }
+
+ return terraform.LiteralFromStringValue(*e.ID)
}
type cloudformationIAMRole struct {
diff --git a/upup/pkg/fi/cloudup/awstasks/launchconfiguration.go b/upup/pkg/fi/cloudup/awstasks/launchconfiguration.go
index 21169b85a8..85500ef7df 100644
--- a/upup/pkg/fi/cloudup/awstasks/launchconfiguration.go
+++ b/upup/pkg/fi/cloudup/awstasks/launchconfiguration.go
@@ -450,6 +450,7 @@ func (_ *LaunchConfiguration) RenderTerraform(t *terraform.TerraformTarget, a, e
return err
}
}
+
if e.IAMInstanceProfile != nil {
tf.IAMInstanceProfile = e.IAMInstanceProfile.TerraformLink()
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment