Skip to content
Snippets Groups Projects
Unverified Commit 437026f5 authored by ChengHao Yang's avatar ChengHao Yang
Browse files

Cleanup: remove all cloud_provider related tasks & files

parent 0a2e68c9
No related branches found
No related tags found
No related merge requests found
Showing
with 6 additions and 685 deletions
......@@ -141,7 +141,7 @@ kube_proxy_nodeport_addresses: >-
# If non-empty, will use this string as identification instead of the actual hostname
# kube_override_hostname: >-
# {%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
# {%- if cloud_provider is defined -%}
# {%- else -%}
# {{ inventory_hostname }}
# {%- endif -%}
......
---
oci_security_list_management: All
oci_use_instance_principals: false
oci_cloud_controller_version: 0.7.0
oci_cloud_controller_pull_source: iad.ocir.io/oracle/cloud-provider-oci
---
- name: "OCI Cloud Controller | Credentials Check | oci_private_key"
fail:
msg: "oci_private_key is missing"
when:
- not oci_use_instance_principals
- oci_private_key is not defined or not oci_private_key
- name: "OCI Cloud Controller | Credentials Check | oci_region_id"
fail:
msg: "oci_region_id is missing"
when:
- not oci_use_instance_principals
- oci_region_id is not defined or not oci_region_id
- name: "OCI Cloud Controller | Credentials Check | oci_tenancy_id"
fail:
msg: "oci_tenancy_id is missing"
when:
- not oci_use_instance_principals
- oci_tenancy_id is not defined or not oci_tenancy_id
- name: "OCI Cloud Controller | Credentials Check | oci_user_id"
fail:
msg: "oci_user_id is missing"
when:
- not oci_use_instance_principals
- oci_user_id is not defined or not oci_user_id
- name: "OCI Cloud Controller | Credentials Check | oci_user_fingerprint"
fail:
msg: "oci_user_fingerprint is missing"
when:
- not oci_use_instance_principals
- oci_user_fingerprint is not defined or not oci_user_fingerprint
- name: "OCI Cloud Controller | Credentials Check | oci_compartment_id"
fail:
msg: "oci_compartment_id is missing. This is the compartment in which the cluster resides"
when:
- oci_compartment_id is not defined or not oci_compartment_id
- name: "OCI Cloud Controller | Credentials Check | oci_vnc_id"
fail:
msg: "oci_vnc_id is missing. This is the Virtual Cloud Network in which the cluster resides"
when:
- oci_vnc_id is not defined or not oci_vnc_id
- name: "OCI Cloud Controller | Credentials Check | oci_subnet1_id"
fail:
msg: "oci_subnet1_id is missingg. This is the first subnet to which loadbalancers will be added"
when:
- oci_subnet1_id is not defined or not oci_subnet1_id
- name: "OCI Cloud Controller | Credentials Check | oci_subnet2_id"
fail:
msg: "oci_subnet2_id is missing. Two subnets are required for load balancer high availability"
when:
- oci_cloud_controller_version is version_compare('0.7.0', '<')
- oci_subnet2_id is not defined or not oci_subnet2_id
- name: "OCI Cloud Controller | Credentials Check | oci_security_list_management"
fail:
msg: "oci_security_list_management is missing, or not defined correctly. Valid options are (All, Frontend, None)."
when:
- oci_security_list_management is not defined or oci_security_list_management not in ["All", "Frontend", "None"]
---
- name: OCI Cloud Controller | Check Oracle Cloud credentials
import_tasks: credentials-check.yml
- name: "OCI Cloud Controller | Generate Cloud Provider Configuration"
template:
src: controller-manager-config.yml.j2
dest: "{{ kube_config_dir }}/controller-manager-config.yml"
mode: "0644"
when: inventory_hostname == groups['kube_control_plane'][0]
- name: "OCI Cloud Controller | Slurp Configuration"
slurp:
src: "{{ kube_config_dir }}/controller-manager-config.yml"
register: controller_manager_config
- name: "OCI Cloud Controller | Encode Configuration"
set_fact:
controller_manager_config_base64: "{{ controller_manager_config.content }}"
when: inventory_hostname == groups['kube_control_plane'][0]
- name: "OCI Cloud Controller | Generate Manifests"
template:
src: oci-cloud-provider.yml.j2
dest: "{{ kube_config_dir }}/oci-cloud-provider.yml"
mode: "0644"
when: inventory_hostname == groups['kube_control_plane'][0]
- name: "OCI Cloud Controller | Apply Manifests"
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/oci-cloud-provider.yml"
state: latest
when: inventory_hostname == groups['kube_control_plane'][0]
{% macro private_key() %}{{ oci_private_key }}{% endmacro %}
{% if oci_use_instance_principals %}
# (https://docs.us-phoenix-1.oraclecloud.com/Content/Identity/Tasks/callingservicesfrominstances.htm).
# Ensure you have setup the following OCI policies and your kubernetes nodes are running within them
# allow dynamic-group [your dynamic group name] to read instance-family in compartment [your compartment name]
# allow dynamic-group [your dynamic group name] to use virtual-network-family in compartment [your compartment name]
# allow dynamic-group [your dynamic group name] to manage load-balancers in compartment [your compartment name]
useInstancePrincipals: true
{% else %}
useInstancePrincipals: false
{% endif %}
auth:
{% if oci_use_instance_principals %}
# This key is put here too for backwards compatibility
useInstancePrincipals: true
{% else %}
useInstancePrincipals: false
region: {{ oci_region_id }}
tenancy: {{ oci_tenancy_id }}
user: {{ oci_user_id }}
key: |
{{ oci_private_key }}
{% if oci_private_key_passphrase is defined %}
passphrase: {{ oci_private_key_passphrase }}
{% endif %}
fingerprint: {{ oci_user_fingerprint }}
{% endif %}
# compartment configures Compartment within which the cluster resides.
compartment: {{ oci_compartment_id }}
# vcn configures the Virtual Cloud Network (VCN) within which the cluster resides.
vcn: {{ oci_vnc_id }}
loadBalancer:
# subnet1 configures one of two subnets to which load balancers will be added.
# OCI load balancers require two subnets to ensure high availability.
subnet1: {{ oci_subnet1_id }}
{% if oci_subnet2_id is defined %}
# subnet2 configures the second of two subnets to which load balancers will be
# added. OCI load balancers require two subnets to ensure high availability.
subnet2: {{ oci_subnet2_id }}
{% endif %}
# SecurityListManagementMode configures how security lists are managed by the CCM.
# "All" (default): Manage all required security list rules for load balancer services.
# "Frontend": Manage only security list rules for ingress to the load
# balancer. Requires that the user has setup a rule that
# allows inbound traffic to the appropriate ports for kube
# proxy health port, node port ranges, and health check port ranges.
# E.g. 10.82.0.0/16 30000-32000.
# "None": Disables all security list management. Requires that the
# user has setup a rule that allows inbound traffic to the
# appropriate ports for kube proxy health port, node port
# ranges, and health check port ranges. E.g. 10.82.0.0/16 30000-32000.
# Additionally requires the user to mange rules to allow
# inbound traffic to load balancers.
securityListManagementMode: {{ oci_security_list_management }}
{% if oci_security_lists is defined and oci_security_lists | length > 0 %}
# Optional specification of which security lists to modify per subnet. This does not apply if security list management is off.
securityLists:
{% for subnet_ocid, list_ocid in oci_security_lists.items() %}
{{ subnet_ocid }}: {{ list_ocid }}
{% endfor %}
{% endif %}
{% if oci_rate_limit is defined and oci_rate_limit | length > 0 %}
# Optional rate limit controls for accessing OCI API
rateLimiter:
{% if oci_rate_limit.rate_limit_qps_read %}
rateLimitQPSRead: {{ oci_rate_limit.rate_limit_qps_read }}
{% endif %}
{% if oci_rate_limit.rate_limit_qps_write %}
rateLimitQPSWrite: {{ oci_rate_limit.rate_limit_qps_write }}
{% endif %}
{% if oci_rate_limit.rate_limit_bucket_read %}
rateLimitBucketRead: {{ oci_rate_limit.rate_limit_bucket_read }}
{% endif %}
{% if oci_rate_limit.rate_limit_bucket_write %}
rateLimitBucketWrite: {{ oci_rate_limit.rate_limit_bucket_write }}
{% endif %}
{% endif %}
apiVersion: v1
data:
cloud-provider.yaml: {{ controller_manager_config_base64 }}
kind: Secret
metadata:
name: oci-cloud-controller-manager
namespace: kube-system
type: Opaque
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: oci-cloud-controller-manager
namespace: kube-system
labels:
k8s-app: oci-cloud-controller-manager
spec:
selector:
matchLabels:
component: oci-cloud-controller-manager
tier: control-plane
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
component: oci-cloud-controller-manager
tier: control-plane
spec:
{% if oci_cloud_controller_pull_secret is defined %}
imagePullSecrets:
- name: {{ oci_cloud_controller_pull_secret }}
{% endif %}
serviceAccountName: cloud-controller-manager
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
volumes:
- name: cfg
secret:
secretName: oci-cloud-controller-manager
- name: kubernetes
hostPath:
path: /etc/kubernetes
containers:
- name: oci-cloud-controller-manager
image: {{ oci_cloud_controller_pull_source }}:{{ oci_cloud_controller_version }}
command: ["/usr/local/bin/oci-cloud-controller-manager"]
args:
- --cloud-config=/etc/oci/cloud-provider.yaml
- --cloud-provider=oci
- --leader-elect-resource-lock=configmaps
- -v=2
volumeMounts:
- name: cfg
mountPath: /etc/oci
readOnly: true
- name: kubernetes
mountPath: /etc/kubernetes
readOnly: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:cloud-controller-manager
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- watch
- patch
- apiGroups:
- ""
resources:
- services/status
verbs:
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
# For leader election
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
resourceNames:
- "cloud-controller-manager"
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- "cloud-controller-manager"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
# For the PVL
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- list
- watch
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: oci-cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
......@@ -59,13 +59,6 @@
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- name: Configure Oracle Cloud provider
include_tasks: oci.yml
tags: oci
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
copy:
src: k8s-cluster-critical-pc.yml
......
---
- name: Copy OCI RBAC Manifest
copy:
src: "oci-rbac.yml"
dest: "{{ kube_config_dir }}/oci-rbac.yml"
mode: "0640"
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube_control_plane'][0]
- name: Apply OCI RBAC
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/oci-rbac.yml"
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube_control_plane'][0]
......@@ -103,14 +103,6 @@ dependencies:
tags:
- container_engine_accelerator
- role: kubernetes-apps/cloud_controller/oci
when:
- cloud_provider is defined
- cloud_provider == "oci"
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- oci
- role: kubernetes-apps/gateway_api
when:
- gateway_api_enabled
......
---
dependencies:
- role: kubernetes-apps/persistent_volumes/openstack
when:
- cloud_provider is defined
- cloud_provider in [ 'openstack' ]
tags:
- persistent_volumes_openstack
- role: kubernetes-apps/persistent_volumes/cinder-csi
when:
- cinder_csi_enabled
......
---
persistent_volumes_enabled: false
storage_classes:
- name: standard
is_default: true
parameters:
availability: nova
---
- name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template
template:
src: "openstack-storage-class.yml.j2"
dest: "{{ kube_config_dir }}/openstack-storage-class.yml"
mode: "0644"
register: manifests
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
kube:
name: storage-class
kubectl: "{{ bin_dir }}/kubectl"
resource: StorageClass
filename: "{{ kube_config_dir }}/openstack-storage-class.yml"
state: "latest"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- manifests.changed
{% for class in storage_classes %}
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: "{{ class.name }}"
annotations:
storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}"
provisioner: kubernetes.io/cinder
{% if class.mount_options is defined %}
mountOptions:
{% for option in class.mount_options | default([]) %}
- "{{ option }}"
{% endfor %}
{% endif %}
parameters:
{% for key, value in (class.parameters | default({})).items() %}
"{{ key }}": "{{ value }}"
{% endfor %}
{% if class.reclaim_policy is defined %}
reclaimPolicy: "{{ class.reclaim_policy }}"
{% endif %}
{% if class.volume_binding_mode is defined %}
volumeBindingMode: "{{ class.volume_binding_mode }}"
{% endif %}
allowVolumeExpansion: {{ expand_persistent_volumes }}
{% endfor %}
......@@ -186,7 +186,7 @@ kube_encryption_resources: [secrets]
# If non-empty, will use this string as identification instead of the actual hostname
kube_override_hostname: >-
{%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
{%- if cloud_provider is defined -%}
{%- else -%}
{{ inventory_hostname }}
{%- endif -%}
......
......@@ -210,10 +210,6 @@ apiServer:
{% if kube_apiserver_feature_gates or kube_feature_gates %}
feature-gates: "{{ kube_apiserver_feature_gates | default(kube_feature_gates, true) | join(',') }}"
{% endif %}
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
cloud-provider: {{ cloud_provider }}
cloud-config: {{ kube_config_dir }}/cloud_config
{% endif %}
{% if tls_min_version is defined %}
tls-min-version: {{ tls_min_version }}
{% endif %}
......@@ -230,13 +226,8 @@ apiServer:
{% if kube_apiserver_tracing %}
tracing-config-file: {{ kube_config_dir }}/tracing/apiserver-tracing.yaml
{% endif %}
{% if kubernetes_audit or kube_token_auth or kube_webhook_token_auth or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] ) or apiserver_extra_volumes or ssl_ca_dirs | length %}
{% if kubernetes_audit or kube_token_auth or kube_webhook_token_auth or apiserver_extra_volumes or ssl_ca_dirs | length %}
extraVolumes:
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
- name: cloud-config
hostPath: {{ kube_config_dir }}/cloud_config
mountPath: {{ kube_config_dir }}/cloud_config
{% endif %}
{% if kube_token_auth %}
- name: token-auth-config
hostPath: {{ kube_token_dir }}
......@@ -326,10 +317,6 @@ controllerManager:
{% for key in kube_kubeadm_controller_extra_args %}
{{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
{% endfor %}
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
cloud-provider: {{ cloud_provider }}
cloud-config: {{ kube_config_dir }}/cloud_config
{% endif %}
{% if kube_network_plugin is defined and kube_network_plugin not in ["cloud"] %}
configure-cloud-routes: "false"
{% endif %}
......@@ -343,18 +330,8 @@ controllerManager:
tls-cipher-suites: {% for tls in tls_cipher_suites %}{{ tls }}{{ "," if not loop.last else "" }}{% endfor %}
{% endif %}
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] or controller_manager_extra_volumes %}
{% if controller_manager_extra_volumes %}
extraVolumes:
{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
- name: openstackcacert
hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
{% endif %}
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
- name: cloud-config
hostPath: {{ kube_config_dir }}/cloud_config
mountPath: {{ kube_config_dir }}/cloud_config
{% endif %}
{% for volume in controller_manager_extra_volumes %}
- name: {{ volume.name }}
hostPath: {{ volume.hostPath }}
......
......@@ -9,7 +9,7 @@ kubeadm_use_file_discovery: "{{ remove_anonymous_access }}"
# If non-empty, will use this string as identification instead of the actual hostname
kube_override_hostname: >-
{%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
{%- if cloud_provider is defined -%}
{%- else -%}
{{ inventory_hostname }}
{%- endif -%}
......@@ -136,7 +136,7 @@ kubelet_custom_flags: []
# If non-empty, will use this string as identification instead of the actual hostname
kube_override_hostname: >-
{%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
{%- if cloud_provider is defined -%}
{%- else -%}
{{ inventory_hostname }}
{%- endif -%}
......@@ -153,61 +153,6 @@ kubelet_healthz_bind_address: 127.0.0.1
# sysctl_file_path to add sysctl conf to
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
# For the openstack integration kubelet will need credentials to access
# openstack apis like nova and cinder. Per default this values will be
# read from the environment.
openstack_auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
openstack_username: "{{ lookup('env', 'OS_USERNAME') }}"
openstack_password: "{{ lookup('env', 'OS_PASSWORD') }}"
openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
openstack_tenant_id: "{{ lookup('env', 'OS_TENANT_ID') | default(lookup('env', 'OS_PROJECT_ID') | default(lookup('env', 'OS_PROJECT_NAME'), true), true) }}"
openstack_tenant_name: "{{ lookup('env', 'OS_TENANT_NAME') }}"
openstack_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
openstack_domain_id: "{{ lookup('env', 'OS_USER_DOMAIN_ID') }}"
# For the vsphere integration, kubelet will need credentials to access
# vsphere apis
# Documentation regarding these values can be found
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
vsphere_scsi_controller_type: pvscsi
# vsphere_public_network is name of the network the VMs are joined to
vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK') | default('') }}"
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values
# azure_tenant_id:
# azure_subscription_id:
# azure_aad_client_id:
# azure_aad_client_secret:
# azure_resource_group:
# azure_location:
# azure_subnet_name:
# azure_security_group_name:
# azure_vnet_name:
# azure_route_table_name:
# supported values are 'standard' or 'vmss'
# azure_vmtype: standard
# Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
azure_loadbalancer_sku: basic
# excludes control plane nodes from standard load balancer.
azure_exclude_master_from_standard_lb: true
# disables the outbound SNAT for public load balancer rules
azure_disable_outbound_snat: false
# use instance metadata service where possible
azure_use_instance_metadata: true
# use specific Azure API endpoints
azure_cloud: AzurePublicCloud
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
# tls_min_version: ""
......
---
- name: Check azure_tenant_id value
fail:
msg: "azure_tenant_id is missing"
when: azure_tenant_id is not defined or not azure_tenant_id
- name: Check azure_subscription_id value
fail:
msg: "azure_subscription_id is missing"
when: azure_subscription_id is not defined or not azure_subscription_id
- name: Check azure_aad_client_id value
fail:
msg: "azure_aad_client_id is missing"
when: azure_aad_client_id is not defined or not azure_aad_client_id
- name: Check azure_aad_client_secret value
fail:
msg: "azure_aad_client_secret is missing"
when: azure_aad_client_secret is not defined or not azure_aad_client_secret
- name: Check azure_resource_group value
fail:
msg: "azure_resource_group is missing"
when: azure_resource_group is not defined or not azure_resource_group
- name: Check azure_location value
fail:
msg: "azure_location is missing"
when: azure_location is not defined or not azure_location
- name: Check azure_subnet_name value
fail:
msg: "azure_subnet_name is missing"
when: azure_subnet_name is not defined or not azure_subnet_name
- name: Check azure_security_group_name value
fail:
msg: "azure_security_group_name is missing"
when: azure_security_group_name is not defined or not azure_security_group_name
- name: Check azure_vnet_name value
fail:
msg: "azure_vnet_name is missing"
when: azure_vnet_name is not defined or not azure_vnet_name
- name: Check azure_vnet_resource_group value
fail:
msg: "azure_vnet_resource_group is missing"
when: azure_vnet_resource_group is not defined or not azure_vnet_resource_group
- name: Check azure_route_table_name value
fail:
msg: "azure_route_table_name is missing"
when: azure_route_table_name is not defined or not azure_route_table_name
- name: Check azure_loadbalancer_sku value
fail:
msg: "azure_loadbalancer_sku has an invalid value '{{ azure_loadbalancer_sku }}'. Supported values are 'basic', 'standard'"
when: azure_loadbalancer_sku not in ["basic", "standard"]
- name: "Check azure_exclude_master_from_standard_lb is a bool"
assert:
that: azure_exclude_master_from_standard_lb | type_debug == 'bool'
- name: "Check azure_disable_outbound_snat is a bool"
assert:
that: azure_disable_outbound_snat | type_debug == 'bool'
- name: "Check azure_use_instance_metadata is a bool"
assert:
that: azure_use_instance_metadata | type_debug == 'bool'
- name: Check azure_vmtype value
fail:
msg: "azure_vmtype is missing. Supported values are 'standard' or 'vmss'"
when: azure_vmtype is not defined or not azure_vmtype
- name: Check azure_cloud value
fail:
msg: "azure_cloud has an invalid value '{{ azure_cloud }}'. Supported values are 'AzureChinaCloud', 'AzureGermanCloud', 'AzurePublicCloud', 'AzureUSGovernmentCloud'."
when: azure_cloud not in ["AzureChinaCloud", "AzureGermanCloud", "AzurePublicCloud", "AzureUSGovernmentCloud"]
---
- name: Check openstack_auth_url value
fail:
msg: "openstack_auth_url is missing"
when: openstack_auth_url is not defined or not openstack_auth_url
- name: Check openstack_username value
fail:
msg: "openstack_username is missing"
when: openstack_username is not defined or not openstack_username
- name: Check openstack_password value
fail:
msg: "openstack_password is missing"
when: openstack_password is not defined or not openstack_password
- name: Check openstack_region value
fail:
msg: "openstack_region is missing"
when: openstack_region is not defined or not openstack_region
- name: Check openstack_tenant_id value
fail:
msg: "one of openstack_tenant_id or openstack_trust_id must be specified"
when:
- openstack_tenant_id is not defined or not openstack_tenant_id
- openstack_trust_id is not defined
- name: Check openstack_trust_id value
fail:
msg: "one of openstack_tenant_id or openstack_trust_id must be specified"
when:
- openstack_trust_id is not defined or not openstack_trust_id
- openstack_tenant_id is not defined
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment