From 5ae85b9de5ce400677321802812d2b1ed863757d Mon Sep 17 00:00:00 2001
From: Vincent Schwarzer <vincent.schwarzer@yahoo.de>
Date: Fri, 3 Mar 2017 15:19:28 +0100
Subject: [PATCH] Added Missing AWS IAM Profiles and Policies

The AWS IAM profiles and policies required to run Kargo on AWS
are no longer hosted in the kubernetes main repo since kube-up got
deprecated. Hence we have to move the files into the kargo repository.
---
 contrib/aws_iam/kubernetes-master-policy.json | 27 +++++++++++
 contrib/aws_iam/kubernetes-master-role.json   | 10 +++++
 contrib/aws_iam/kubernetes-minion-policy.json | 45 +++++++++++++++++++
 contrib/aws_iam/kubernetes-minion-role.json   | 10 +++++
 docs/aws.md                                   |  2 +-
 5 files changed, 93 insertions(+), 1 deletion(-)
 create mode 100644 contrib/aws_iam/kubernetes-master-policy.json
 create mode 100644 contrib/aws_iam/kubernetes-master-role.json
 create mode 100644 contrib/aws_iam/kubernetes-minion-policy.json
 create mode 100644 contrib/aws_iam/kubernetes-minion-role.json

diff --git a/contrib/aws_iam/kubernetes-master-policy.json b/contrib/aws_iam/kubernetes-master-policy.json
new file mode 100644
index 000000000..e5cbaea80
--- /dev/null
+++ b/contrib/aws_iam/kubernetes-master-policy.json
@@ -0,0 +1,27 @@
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Action": ["ec2:*"],
+      "Resource": ["*"]
+    },
+    {
+      "Effect": "Allow",
+      "Action": ["elasticloadbalancing:*"],
+      "Resource": ["*"]
+    },
+    {
+      "Effect": "Allow",
+      "Action": ["route53:*"],
+      "Resource": ["*"]
+    },
+    {
+      "Effect": "Allow",
+      "Action": "s3:*",
+      "Resource": [
+        "arn:aws:s3:::kubernetes-*"
+      ]
+    }
+  ]
+}
diff --git a/contrib/aws_iam/kubernetes-master-role.json b/contrib/aws_iam/kubernetes-master-role.json
new file mode 100644
index 000000000..66d5de1d5
--- /dev/null
+++ b/contrib/aws_iam/kubernetes-master-role.json
@@ -0,0 +1,10 @@
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Principal": { "Service": "ec2.amazonaws.com"},
+      "Action": "sts:AssumeRole"
+    }
+  ]
+}
diff --git a/contrib/aws_iam/kubernetes-minion-policy.json b/contrib/aws_iam/kubernetes-minion-policy.json
new file mode 100644
index 000000000..af81e98c8
--- /dev/null
+++ b/contrib/aws_iam/kubernetes-minion-policy.json
@@ -0,0 +1,45 @@
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Action": "s3:*",
+      "Resource": [
+        "arn:aws:s3:::kubernetes-*"
+      ]
+    },
+    {
+      "Effect": "Allow",
+      "Action": "ec2:Describe*",
+      "Resource": "*"
+    },
+    {
+      "Effect": "Allow",
+      "Action": "ec2:AttachVolume",
+      "Resource": "*"
+    },
+    {
+      "Effect": "Allow",
+      "Action": "ec2:DetachVolume",
+      "Resource": "*"
+    },
+    {
+      "Effect": "Allow",
+      "Action": ["route53:*"],
+      "Resource": ["*"]
+    },
+    {
+      "Effect": "Allow",
+      "Action": [
+        "ecr:GetAuthorizationToken",
+        "ecr:BatchCheckLayerAvailability",
+        "ecr:GetDownloadUrlForLayer",
+        "ecr:GetRepositoryPolicy",
+        "ecr:DescribeRepositories",
+        "ecr:ListImages",
+        "ecr:BatchGetImage"
+      ],
+      "Resource": "*"
+    }
+  ]
+}
diff --git a/contrib/aws_iam/kubernetes-minion-role.json b/contrib/aws_iam/kubernetes-minion-role.json
new file mode 100644
index 000000000..66d5de1d5
--- /dev/null
+++ b/contrib/aws_iam/kubernetes-minion-role.json
@@ -0,0 +1,10 @@
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Principal": { "Service": "ec2.amazonaws.com"},
+      "Action": "sts:AssumeRole"
+    }
+  ]
+}
diff --git a/docs/aws.md b/docs/aws.md
index 429e77a54..b16b8d725 100644
--- a/docs/aws.md
+++ b/docs/aws.md
@@ -3,7 +3,7 @@ AWS
 
 To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
 
-Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes/kubernetes/tree/master/cluster/aws/templates/iam). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
+Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
 
 The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
 
-- 
GitLab