diff --git a/README.md b/README.md
index 0d2cd5913d41c61a3b0735d4529049e383bf57e0..cb1320756899548625528da7b6ef300dedcfc7a4 100644
--- a/README.md
+++ b/README.md
@@ -134,7 +134,7 @@ plugins can be deployed for a given single cluster.
 Requirements
 ------------
 
--   **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
+-   **Ansible v2.5 (or newer) and python-netaddr is installed on the machine
     that will run Ansible commands**
 -   **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
 -   The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/downloads.md#offline-environment))
diff --git a/contrib/packaging/rpm/kubespray.spec b/contrib/packaging/rpm/kubespray.spec
index 6ec3ffca70552a8ee2bcf7867346aea166cdf15a..e4c1808bea1ee0229fe3bdffbeb5cbf575d37f6b 100644
--- a/contrib/packaging/rpm/kubespray.spec
+++ b/contrib/packaging/rpm/kubespray.spec
@@ -20,7 +20,7 @@ BuildRequires:  python2-setuptools
 BuildRequires:  python-d2to1
 BuildRequires:  python2-pbr
 
-Requires: ansible >= 2.4.0
+Requires: ansible >= 2.5.0
 Requires: python-jinja2 >= 2.10
 Requires: python-netaddr
 Requires: python-pbr
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index de4556dcf4a36b9c0b4d6dde4421a07e9ab5de13..e9750f692754d6cd69b78a859fb0cbfa9fd722d4 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -45,7 +45,7 @@
           docker requires a minimum kernel version of
           {{ docker_kernel_min_version }} on
           {{ ansible_distribution }}-{{ ansible_distribution_version }}
-  when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (ansible_kernel|version_compare(docker_kernel_min_version, "<"))
+  when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (ansible_kernel is version(docker_kernel_min_version, "<"))
   tags:
     - facts
 
@@ -58,7 +58,7 @@
     url: "{{docker_repo_key_info.url}}"
     state: present
   register: keyserver_task_result
-  until: keyserver_task_result|succeeded
+  until: keyserver_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | d(3) }}"
   with_items: "{{ docker_repo_key_info.repo_keys }}"
@@ -79,7 +79,7 @@
     url: "{{dockerproject_repo_key_info.url}}"
     state: present
   register: keyserver_task_result
-  until: keyserver_task_result|succeeded
+  until: keyserver_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | d(3) }}"
   with_items: "{{ dockerproject_repo_key_info.repo_keys }}"
@@ -134,7 +134,7 @@
     state: present
     update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
   register: docker_task_result
-  until: docker_task_result|succeeded
+  until: docker_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | d(3) }}"
   with_items: "{{ docker_package_info.pkgs }}"
@@ -145,7 +145,7 @@
 - name: get available packages on Ubuntu
   command: apt-cache policy docker-ce
   when:
-    - docker_task_result|failed
+    - docker_task_result is failed
     - ansible_distribution == 'Ubuntu'
   register: available_packages
 
@@ -153,7 +153,7 @@
   fail:
     msg: "{{available_packages}}"
   when:
-    - docker_task_result|failed
+    - docker_task_result is failed
     - ansible_distribution == 'Ubuntu'
 
 # This is required to ensure any apt upgrade will not break kubernetes
@@ -185,7 +185,7 @@
   when: >
         dns_mode != 'none' and
         resolvconf_mode == 'docker_dns' and
-        installed_docker_version.stdout|version_compare('1.12', '<')
+        installed_docker_version.stdout is version('1.12', '<')
 
 - name: Set docker systemd config
   import_tasks: systemd.yml
diff --git a/roles/container-engine/docker/templates/docker.service.j2 b/roles/container-engine/docker/templates/docker.service.j2
index 8dc82bbb205acfc3a80bbd132f71290dccb7dbf6..c3e2d85a75ce1a4b010415d5cd43ff4aa41ebff2 100644
--- a/roles/container-engine/docker/templates/docker.service.j2
+++ b/roles/container-engine/docker/templates/docker.service.j2
@@ -21,7 +21,7 @@ Environment=GOTRACEBACK=crash
 ExecReload=/bin/kill -s HUP $MAINPID
 Delegate=yes
 KillMode=process
-ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout|version_compare('17.03', '<') %} daemon{% else %}d{% endif %} \
+ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout is version('17.03', '<') %} daemon{% else %}d{% endif %} \
 {% if ansible_os_family == "Suse" %}
           --containerd /run/containerd/containerd.sock --add-runtime oci=/usr/bin/docker-runc \
 {% endif %}
diff --git a/roles/container-engine/docker/vars/redhat.yml b/roles/container-engine/docker/vars/redhat.yml
index d42bc85220fc7c4e21deadce939f0854a22a5051..41f62c7000f47fe4dc05c2fb3d24ddb6426e3dd3 100644
--- a/roles/container-engine/docker/vars/redhat.yml
+++ b/roles/container-engine/docker/vars/redhat.yml
@@ -41,7 +41,7 @@ docker_pkgs:
 docker_package_info:
   pkg_mgr: yum
   pkgs: |-
-    {%- if docker_version | version_compare('17.04', '<') -%}
+    {%- if docker_version is version('17.04', '<') -%}
     {{ docker_pkgs_use_docker_ce }}
     {%- else -%}
     {{ docker_pkgs }}
diff --git a/roles/container-engine/rkt/tasks/install.yml b/roles/container-engine/rkt/tasks/install.yml
index 91ab4bac6b58da6a2238d6d00030a962f466470e..5865293718d3a50201d2d7426eac553559abbc5c 100644
--- a/roles/container-engine/rkt/tasks/install.yml
+++ b/roles/container-engine/rkt/tasks/install.yml
@@ -20,7 +20,7 @@
     deb: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
     state: present
   register: rkt_task_result
-  until: rkt_task_result|succeeded
+  until: rkt_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when: ansible_os_family == "Debian"
@@ -36,7 +36,7 @@
     pkg: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
     state: present
   register: rkt_task_result
-  until: rkt_task_result|succeeded
+  until: rkt_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when:
@@ -48,7 +48,7 @@
     name: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
     state: present
   register: rkt_task_result
-  until: rkt_task_result|succeeded
+  until: rkt_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when: ansible_os_family == "Suse"
diff --git a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2
index 582f6639af9373fc02d300dfde81479566bb0f5f..5011b5798a2e81fe0a121ec9aa110cfb836bf9ba 100644
--- a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2
+++ b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2
@@ -31,7 +31,7 @@ spec:
         scheduler.alpha.kubernetes.io/critical-pod: ''
         scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-cluster-critical
 {% endif %}
       serviceAccountName: dnsmasq
diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml.j2 b/roles/dnsmasq/templates/dnsmasq-deploy.yml.j2
index 59ef45ba9a7c65c771c6af4992a65121cc171672..c891097577ff4377ce0496ca248950f0f5ad5c0d 100644
--- a/roles/dnsmasq/templates/dnsmasq-deploy.yml.j2
+++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml.j2
@@ -21,7 +21,7 @@ spec:
         kubernetes.io/cluster-service: "true"
         kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-cluster-critical
 {% endif %}
       tolerations:
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index 7e3923606fb44f153b28bc078d8d12de443d2f1d..bf0c9b1cff4341cea35e23023338937a8fe55482 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -15,7 +15,7 @@
 - name: container_download | Download containers if pull is required or told to always pull (delegate)
   command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}"
   register: pull_task_result
-  until: pull_task_result|succeeded
+  until: pull_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when:
@@ -30,7 +30,7 @@
 - name: container_download | Download containers if pull is required or told to always pull (all nodes)
   command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}"
   register: pull_task_result
-  until: pull_task_result|succeeded
+  until: pull_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when:
diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml
index c7e37d7f3d69e5142f629de66d67358f9be9da4a..b9cb69a30aebc4f42255c25546ea1181105b32b6 100644
--- a/roles/download/tasks/sync_container.yml
+++ b/roles/download/tasks/sync_container.yml
@@ -99,7 +99,7 @@
   delegate_facts: no
   become: false
   register: get_task
-  until: get_task|succeeded
+  until: get_task is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when:
diff --git a/roles/kubernetes-apps/ansible/tasks/kubedns.yml b/roles/kubernetes-apps/ansible/tasks/kubedns.yml
index e7bf8298fa2fcfdca3019c81e7220e156e632c60..cc805778b0d595ff9096b8fde653ec114d46fb2f 100644
--- a/roles/kubernetes-apps/ansible/tasks/kubedns.yml
+++ b/roles/kubernetes-apps/ansible/tasks/kubedns.yml
@@ -36,7 +36,7 @@
   when:
     - dns_mode in ['kubedns', 'dnsmasq_kubedns']
     - inventory_hostname == groups['kube-master'][0]
-    - rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
+    - rbac_enabled and kubedns_version is version("1.11.0", "<", strict=True)
   tags:
     - dnsmasq
     - kubedns
diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml
index 62169d27d495c49a508e2e3602a30b8d32d67314..449588c9d7c7c581c38efbba1674e05d11df8862 100644
--- a/roles/kubernetes-apps/ansible/tasks/main.yml
+++ b/roles/kubernetes-apps/ansible/tasks/main.yml
@@ -52,9 +52,9 @@
   when:
     - dns_mode != 'none'
     - inventory_hostname == groups['kube-master'][0]
-    - not item|skipped
+    - not item is skipped
   register: resource_result
-  until: resource_result|succeeded
+  until: resource_result is succeeded
   retries: 4
   delay: 5
   tags:
diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
index 655ef744b0fd2c71b1adce9c9e71b645fed0c16e..46e033667e06d34e3f7edfc0b365ebbb7bb2a844 100644
--- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml
+++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
@@ -68,4 +68,4 @@
     filename: "{{kube_config_dir}}/{{item.item.file}}"
     state: "latest"
   with_items: "{{ manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and not item|skipped
+  when: inventory_hostname == groups['kube-master'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
index aef110238e8499ba509f764795ae6c521d8a33c3..134392750eb2a433d8c788ec045fa79c28559759 100644
--- a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
@@ -24,7 +24,7 @@ spec:
       annotations:
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-cluster-critical
 {% endif %}
       serviceAccountName: coredns
diff --git a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
index 17695a961ea3b6fd14e4ceb3986641d77e5bf465..61a8bddd0232c7c7c03d63c654331801ba9b4f7f 100644
--- a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
@@ -140,7 +140,7 @@ spec:
       labels:
         k8s-app: kubernetes-dashboard
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-cluster-critical
 {% endif %}
       containers:
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
index 1852c4aeab423abafcbc4381d15bf1f7e39ac00e..6426a969ebbc90c68bc0ecb1f0cfbe7da60681d9 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
@@ -28,7 +28,7 @@ spec:
       labels:
         k8s-app: kubedns-autoscaler
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-cluster-critical
 {% endif %}
       # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
index f6a4c8ff68cc1c362bbca0d43ee8c7ec0064c8f2..af6df513c61f766ef9bc2ca948034a26e7b926ee 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
@@ -27,7 +27,7 @@ spec:
       annotations:
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-cluster-critical
 {% endif %}
       # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
index 09d9e498d36102a06d7528df25d3141ab6359c8f..a90a1ad9a7e7f13bbe388eb768cb9e28be145431 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
@@ -12,7 +12,7 @@ spec:
       labels:
         app: netchecker-agent
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
 {% endif %}
       tolerations:
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
index 376171c2836e0bef9d356eb38380197fe2f6919a..79db63926ff4ba2146a73885ac3625ad192ef0ff 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
@@ -16,10 +16,10 @@ spec:
       # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
       nodeSelector:
         beta.kubernetes.io/os: linux
-{% if kube_version | version_compare('v1.6', '>=') %}
+{% if kube_version is version('v1.6', '>=') %}
       dnsPolicy: ClusterFirstWithHostNet
 {% endif %}
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
 {% endif %}
       tolerations:
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
index 39a1eafa5fa74a8b3a7f014fa522f5e0045dcc68..3060cf27ad9705cbeaf0d806fd7d630dab26d237 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
@@ -11,7 +11,7 @@ spec:
         app: netchecker-server
       namespace: {{ netcheck_namespace }}
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
 {% endif %}
       containers:
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index 8cd1f5052f4323443e5b218478d122985aa15901..de2e6f84041cfb701c72622aa06d098e000194cc 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -49,7 +49,7 @@
   with_items: "{{ psp_manifests.results }}"
   when:
     - inventory_hostname == groups['kube-master'][0]
-    - not item|skipped
+    - not item is skipped
 
 - name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes
   template:
@@ -130,8 +130,8 @@
     - rbac_enabled
     - cloud_provider is defined
     - cloud_provider == 'vsphere'
-    - kube_version | version_compare('v1.9.0', '>=')
-    - kube_version | version_compare('v1.9.3', '<=')
+    - kube_version is version('v1.9.0', '>=')
+    - kube_version is version('v1.9.3', '<=')
     - inventory_hostname == groups['kube-master'][0]
   tags: vsphere
 
@@ -146,8 +146,8 @@
     - cloud_provider == 'vsphere'
     - vsphere_cloud_provider.rc is defined
     - vsphere_cloud_provider.rc != 0
-    - kube_version | version_compare('v1.9.0', '>=')
-    - kube_version | version_compare('v1.9.3', '<=')
+    - kube_version is version('v1.9.0', '>=')
+    - kube_version is version('v1.9.3', '<=')
     - inventory_hostname == groups['kube-master'][0]
   tags: vsphere
 
@@ -164,8 +164,8 @@
     - cloud_provider == 'vsphere'
     - vsphere_cloud_provider.rc is defined
     - vsphere_cloud_provider.rc != 0
-    - kube_version | version_compare('v1.9.0', '>=')
-    - kube_version | version_compare('v1.9.3', '<=')
+    - kube_version is version('v1.9.0', '>=')
+    - kube_version is version('v1.9.3', '<=')
     - inventory_hostname == groups['kube-master'][0]
   tags: vsphere
 
@@ -178,7 +178,7 @@
 - name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
   copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
   when:
-    - kube_version|version_compare('v1.11.1', '>=')
+    - kube_version is version('v1.11.1', '>=')
     - inventory_hostname == groups['kube-master'][0]
 
 - name: PriorityClass | Create k8s-cluster-critical
@@ -189,5 +189,5 @@
     filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
     state: latest
   when:
-    - kube_version|version_compare('v1.11.1', '>=')
+    - kube_version is version('v1.11.1', '>=')
     - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
index 466a56598a9a3739326e913fad9bdb447cc745ee..e75c8feff2f8dba33b67ee0d3cbafa1a415d156c 100644
--- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
@@ -19,7 +19,7 @@ spec:
         app: cephfs-provisioner
         version: {{ cephfs_provisioner_image_tag }}
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: {% if cephfs_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
 {% endif %}
       serviceAccount: cephfs-provisioner
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
index 713b72287215882340af5847bfc680133bd98205..22b2f8b22582eec1b0531b680991348a2f105f01 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
@@ -18,7 +18,7 @@ spec:
         k8s-app: local-volume-provisioner
         version: {{ local_volume_provisioner_image_tag }}
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
 {% endif %}
       serviceAccountName: local-volume-provisioner
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index 65695d4fc100f19374ba67b075884aca6cbdfd71..40476b0bf3061095d99f745402787b61f1f4249c 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -45,7 +45,7 @@
     --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}
     {% if rbac_enabled %} --service-account=tiller{% endif %}
     {% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
-    {% if kube_version|version_compare('v1.11.1', '>=') %} --override spec.template.spec.priorityClassName={% if tiller_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{% endif %}
+    {% if kube_version is version('v1.11.1', '>=') %} --override spec.template.spec.priorityClassName={% if tiller_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{% endif %}
     {% if tiller_override is defined and tiller_override != "" %} --override {{ tiller_override }}{% endif %}
     {% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
     {% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %}
@@ -65,7 +65,7 @@
     {% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %}
     {% if rbac_enabled %} --service-account=tiller{% endif %}
     {% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
-    {% if kube_version|version_compare('v1.11.1', '>=') %} --override spec.template.spec.priorityClassName={% if tiller_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{% endif %}
+    {% if kube_version is version('v1.11.1', '>=') %} --override spec.template.spec.priorityClassName={% if tiller_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{% endif %}
     {% if tiller_override is defined and tiller_override != "" %} --override {{ tiller_override }}{% endif %}
     {% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
     {% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %}
@@ -74,7 +74,7 @@
     | {{bin_dir}}/kubectl apply -f -
   changed_when: false
   when:
-    - (tiller_override is defined and tiller_override != "") or (kube_version|version_compare('v1.11.1', '>='))
+    - (tiller_override is defined and tiller_override != "") or (kube_version is version('v1.11.1', '>='))
     - inventory_hostname == groups['kube-master'][0]
   environment: "{{proxy_env}}"
 
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2
index c6e981f7b67b74996e69faaeda136ac42712d94f..9fa45dae76afc731df4ac127f640c01397b7ec3d 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2
@@ -22,7 +22,7 @@ spec:
         release: cert-manager
       annotations:
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: {% if cert_manager_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
 {% endif %}
       serviceAccountName: cert-manager
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2
index 87c6dadfd1f73ed49f12dd359db18d18ec1e8b21..b3359356e7c70c107c1d82383616307223d67a54 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2
@@ -19,7 +19,7 @@ spec:
         app.kubernetes.io/name: default-backend
         app.kubernetes.io/part-of: ingress-nginx
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
 {% endif %}
       terminationGracePeriodSeconds: 60
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
index a504c1b3a9d8be2eba220c188d23a97e60badee6..6de89c15a66ee1246b13f6c4524bc5e20a50d079 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
@@ -29,7 +29,7 @@ spec:
       nodeSelector:
         {{ ingress_nginx_nodeselector | to_nice_yaml }}
 {%- endif %}
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
 {% endif %}
       containers:
diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
index e4215ed62011052352759ab28b83a67486064a30..5f48e6c419f47c2afa920a673803637e4b5b517e 100644
--- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
@@ -10,7 +10,7 @@
   with_items:
     - "{{ calico_node_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0] and not item|skipped
+    - inventory_hostname == groups['kube-master'][0] and not item is skipped
 
 - name: "calico upgrade complete"
   shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
index 3640fe762eec1aa9385d89993ce8608825e2802f..d5776def15172389aef7cb92400027311483b932 100644
--- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
@@ -8,4 +8,4 @@
     filename: "{{kube_config_dir}}/{{item.item.file}}"
     state: "latest"
   with_items: "{{ canal_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and not item|skipped
+  when: inventory_hostname == groups['kube-master'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
index 5d90bdb018257a0fbf43933701903bb174c174ad..503da1a2a0e9da1bab0626ae5b35e73cc320d046 100755
--- a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
@@ -8,7 +8,7 @@
     filename: "{{kube_config_dir}}/{{item.item.file}}"
     state: "latest"
   with_items: "{{ cilium_node_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and not item|skipped
+  when: inventory_hostname == groups['kube-master'][0] and not item is skipped
 
 - name: Cilium | Wait for pods to run
   command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"
diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
index bdf954bf99d673bb628a8db1d562e82ed265882b..2be0739f8f4e0a170dbd558173ce1742f889eff1 100644
--- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
@@ -8,7 +8,7 @@
     filename: "{{kube_config_dir}}/{{item.item.file}}"
     state: "latest"
   with_items: "{{ flannel_node_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and not item|skipped
+  when: inventory_hostname == groups['kube-master'][0] and not item is skipped
 
 - name: Flannel | Wait for flannel subnet.env file presence
   wait_for:
diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
index 7e9377da4ad676be1512ec3fb9cbbd94c721d17d..4c9c9c73cea414cc94886a1af47e4ee4df31bd67 100644
--- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
@@ -34,4 +34,4 @@
     - "{{ calico_kube_manifests.results }}"
   when:
     - inventory_hostname == groups['kube-master'][0]
-    - not item|skipped
+    - not item is skipped
diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
index 5d26fd7729408f3f9acd9170725e75486347458d..dcb7c9f5f3ee6f7685b8617c56723761976cb01e 100644
--- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
+++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
@@ -29,7 +29,7 @@ spec:
       tolerations:
         - effect: NoSchedule
           operator: Exists
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-cluster-critical
 {% endif %}
       containers:
diff --git a/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2
index 0fe493a81c355160fa38a56bcd15871199da2973..71f29d8424f09ed792015157fa5212de3f853df3 100644
--- a/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2
+++ b/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2
@@ -21,7 +21,7 @@ spec:
         kubernetes.io/cluster-service: "true"
         version: v{{ registry_proxy_image_tag }}
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: {% if registry_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
 {% endif %}
       serviceAccountName: registry-proxy
diff --git a/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
index 83a1b058cff272982fef8b6ccd8e3d4dbf30e339..ac6a0dfdad7aa07051fe839332d9782506f3f0b5 100644
--- a/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
+++ b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
@@ -22,7 +22,7 @@ spec:
         version: v{{ registry_image_tag }}
         kubernetes.io/cluster-service: "true"
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: {% if registry_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
 {% endif %}
       serviceAccountName: registry
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index c15b0699b25fa2fb98fa77bdab7d1438d12e516d..84fd31f69140da885af445583f5c9d3f6faf7d03 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -34,19 +34,19 @@
 - name: sets kubeadm api version to v1alpha1
   set_fact:
     kubeadmConfig_api_version: v1alpha1
-  when: kubeadm_output.stdout|version_compare('v1.11.0', '<')
+  when: kubeadm_output.stdout is version('v1.11.0', '<')
 
 - name: sets kubeadm api version to v1alpha2
   set_fact:
     kubeadmConfig_api_version: v1alpha2
   when:
-    - kubeadm_output.stdout|version_compare('v1.11.0', '>=')
-    - kubeadm_output.stdout|version_compare('v1.12.0', '<')
+    - kubeadm_output.stdout is version('v1.11.0', '>=')
+    - kubeadm_output.stdout is version('v1.12.0', '<')
 
 - name: sets kubeadm api version to v1alpha3
   set_fact:
     kubeadmConfig_api_version: v1alpha3
-  when: kubeadm_output.stdout|version_compare('v1.12.0', '>=')
+  when: kubeadm_output.stdout is version('v1.12.0', '>=')
 
 - name: Create kubeadm client config
   template:
diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml
index 49a09e2126bd35ad8af09e2f48a1eb03e78b505c..58cd99f6d525bd67fc883814f3289520dd38449b 100644
--- a/roles/kubernetes/master/defaults/main.yml
+++ b/roles/kubernetes/master/defaults/main.yml
@@ -80,7 +80,7 @@ kube_apiserver_admission_control:
   - ServiceAccount
   - DefaultStorageClass
   - >-
-      {%- if kube_version | version_compare('v1.9', '<') -%}
+      {%- if kube_version is version('v1.9', '<') -%}
       GenericAdmissionWebhook
       {%- else -%}
       MutatingAdmissionWebhook,ValidatingAdmissionWebhook
diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml
index f6735047908f6d319ed539046ebc22a21c898925..ec3cdbe081e1e231b8151a96c6853a7fb973299a 100644
--- a/roles/kubernetes/master/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml
@@ -84,19 +84,19 @@
 - name: sets kubeadm api version to v1alpha1
   set_fact:
     kubeadmConfig_api_version: v1alpha1
-  when: kubeadm_output.stdout|version_compare('v1.11.0', '<')
+  when: kubeadm_output.stdout is version('v1.11.0', '<')
 
 - name: sets kubeadm api version to v1alpha2
   set_fact:
     kubeadmConfig_api_version: v1alpha2
   when:
-    - kubeadm_output.stdout|version_compare('v1.11.0', '>=')
-    - kubeadm_output.stdout|version_compare('v1.12.0', '<')
+    - kubeadm_output.stdout is version('v1.11.0', '>=')
+    - kubeadm_output.stdout is version('v1.12.0', '<')
 
 - name: sets kubeadm api version to v1alpha3
   set_fact:
     kubeadmConfig_api_version: v1alpha3
-  when: kubeadm_output.stdout|version_compare('v1.12.0', '>=')
+  when: kubeadm_output.stdout is version('v1.12.0', '>=')
 
 # Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
 - name: set kubeadm_config_api_fqdn define
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
index 2a9b8582d3c6ed2ee113db74c9f180faa7a42b64..531c27f9ab6de632bd25dd8086520de682a71104 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
@@ -26,10 +26,10 @@ cloudProvider: {{ cloud_provider }}
 {% if kube_proxy_mode == 'ipvs' %}
 kubeProxy:
   config:
-{% if kube_version | version_compare('v1.10', '<') %}
+{% if kube_version is version('v1.10', '<') %}
     featureGates: SupportIPVSProxyMode=true
 {% endif %}
-{% if kube_version | version_compare('v1.10', '>=') %}
+{% if kube_version is version('v1.10', '>=') %}
     featureGates:
       SupportIPVSProxyMode: true
 {% endif %}
@@ -49,7 +49,7 @@ apiServerExtraArgs:
   insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
 {% endif %}
   insecure-port: "{{ kube_apiserver_insecure_port }}"
-{% if kube_version | version_compare('v1.10', '<') %}
+{% if kube_version is version('v1.10', '<') %}
   admission-control: {{ kube_apiserver_admission_control | join(',') }}
 {% else %}
 {% if kube_apiserver_enable_admission_plugins|length > 0 %}
@@ -60,7 +60,7 @@ apiServerExtraArgs:
 {% endif %}
 {% endif %}
   apiserver-count: "{{ kube_apiserver_count }}"
-{% if kube_version | version_compare('v1.9', '>=') %}
+{% if kube_version is version('v1.9', '>=') %}
   endpoint-reconciler-type: lease
 {% endif %}
 {% if etcd_events_cluster_enabled %}
@@ -72,7 +72,7 @@ apiServerExtraArgs:
   request-timeout: "{{ kube_apiserver_request_timeout }}"
   repair-malformed-updates: "false"
   enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
-{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=')  %}
+{% if kube_api_anonymous_auth is defined and kube_version is version('v1.5', '>=')  %}
   anonymous-auth: "{{ kube_api_anonymous_auth }}"
 {% endif %}
 {% if kube_basic_auth|default(true) %}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
index 61e62a43ee5559bd9a70b48d7f7b5639faca5176..39c11cba9a34563bd64bc4289e314bc1641e0fe5 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
@@ -41,7 +41,7 @@ apiServerExtraArgs:
   insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
 {% endif %}
   insecure-port: "{{ kube_apiserver_insecure_port }}"
-{% if kube_version | version_compare('v1.10', '<') %}
+{% if kube_version is version('v1.10', '<') %}
   admission-control: {{ kube_apiserver_admission_control | join(',') }}
 {% else %}
 {% if kube_apiserver_enable_admission_plugins|length > 0 %}
@@ -52,7 +52,7 @@ apiServerExtraArgs:
 {% endif %}
 {% endif %}
   apiserver-count: "{{ kube_apiserver_count }}"
-{% if kube_version | version_compare('v1.9', '>=') %}
+{% if kube_version is version('v1.9', '>=') %}
   endpoint-reconciler-type: lease
 {% endif %}
 {% if etcd_events_cluster_enabled %}
@@ -64,7 +64,7 @@ apiServerExtraArgs:
   request-timeout: "{{ kube_apiserver_request_timeout }}"
   repair-malformed-updates: "false"
   enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
-{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=')  %}
+{% if kube_api_anonymous_auth is defined and kube_version is version('v1.5', '>=')  %}
   anonymous-auth: "{{ kube_api_anonymous_auth }}"
 {% endif %}
 {% if kube_basic_auth|default(true) %}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
index 92990a137e6053b2cc984c946ead8621011a4004..142bd1f7b9eecd005b1bfdb6d41d8838b4d02150 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
@@ -44,7 +44,7 @@ apiServerExtraArgs:
   insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
 {% endif %}
   insecure-port: "{{ kube_apiserver_insecure_port }}"
-{% if kube_version | version_compare('v1.10', '<') %}
+{% if kube_version is version('v1.10', '<') %}
   admission-control: {{ kube_apiserver_admission_control | join(',') }}
 {% else %}
 {% if kube_apiserver_enable_admission_plugins|length > 0 %}
@@ -55,7 +55,7 @@ apiServerExtraArgs:
 {% endif %}
 {% endif %}
   apiserver-count: "{{ kube_apiserver_count }}"
-{% if kube_version | version_compare('v1.9', '>=') %}
+{% if kube_version is version('v1.9', '>=') %}
   endpoint-reconciler-type: lease
 {% endif %}
 {% if etcd_events_cluster_enabled %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index 47f0de3e82c9a8170e71a54bcdb9ee4cc783e677..9261519284be425a0a4d0b8448fe699637144a6a 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -11,10 +11,10 @@ metadata:
     kubespray.apiserver-cert/serial: "{{ apiserver_cert_serial }}"
 spec:
   hostNetwork: true
-{% if kube_version | version_compare('v1.6', '>=')  %}
+{% if kube_version is version('v1.6', '>=')  %}
   dnsPolicy: ClusterFirst
 {% endif %}
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
   priorityClassName: system-node-critical
 {% endif %}
   containers:
@@ -43,7 +43,7 @@ spec:
 {%   if etcd_events_cluster_enabled %}
     - --etcd-servers-overrides=/events#{{ etcd_events_access_addresses_semicolon }}
 {% endif %}
-{%   if kube_version | version_compare('v1.9', '<')  %}
+{%   if kube_version is version('v1.9', '<')  %}
     - --etcd-quorum-read=true
 {% endif %}
     - --etcd-cafile={{ etcd_cert_dir }}/ca.pem
@@ -54,10 +54,10 @@ spec:
 {% endif %}
     - --bind-address={{ kube_apiserver_bind_address }}
     - --apiserver-count={{ kube_apiserver_count }}
-{% if kube_version | version_compare('v1.9', '>=') %}
+{% if kube_version is version('v1.9', '>=') %}
     - --endpoint-reconciler-type=lease
 {% endif %}
-{% if kube_version | version_compare('v1.10', '<') %}
+{% if kube_version is version('v1.10', '<') %}
     - --admission-control={{ kube_apiserver_admission_control | join(',') }}
 {% else %}
 {% if kube_apiserver_enable_admission_plugins|length > 0 %}
@@ -114,7 +114,7 @@ spec:
 {%   endfor %}
 {% endif %}
 {% if enable_network_policy %}
-{%   if kube_version | version_compare('v1.8', '<')  %}
+{%   if kube_version is version('v1.8', '<')  %}
     - --runtime-config=extensions/v1beta1/networkpolicies=true
 {%   endif %}
 {% endif %}
@@ -124,7 +124,7 @@ spec:
     - --cloud-provider={{ cloud_provider }}
     - --cloud-config={{ kube_config_dir }}/cloud_config
 {% endif %}
-{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=')  %}
+{% if kube_api_anonymous_auth is defined and kube_version is version('v1.5', '>=')  %}
     - --anonymous-auth={{ kube_api_anonymous_auth }}
 {% endif %}
 {% if authorization_modes %}
@@ -136,7 +136,7 @@ spec:
 {% if kube_feature_gates %}
     - --feature-gates={{ kube_feature_gates|join(',') }}
 {% endif %}
-{% if kube_version | version_compare('v1.9', '>=') %}
+{% if kube_version is version('v1.9', '>=') %}
     - --requestheader-client-ca-file={{ kube_cert_dir }}/{{ kube_front_proxy_ca }}
 {# FIXME(mattymo): Vault certs do not work with front-proxy-client #}
 {% if cert_management == "vault" %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
index c2208a9e012e882178db6e877c23c50e8d477697..2511c4d7eb25734d712a470a5fe602eda3587e69 100644
--- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
@@ -10,10 +10,10 @@ metadata:
     kubespray.controller-manager-cert/serial: "{{ controller_manager_cert_serial }}"
 spec:
   hostNetwork: true
-{% if kube_version | version_compare('v1.6', '>=') %}
+{% if kube_version is version('v1.6', '>=') %}
   dnsPolicy: ClusterFirst
 {% endif %}
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
   priorityClassName: system-node-critical
 {% endif %}
   containers:
diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
index b1178f4202c80327791bc6dc4b760bc295397c59..ebe258200ebe416a7e0c0aeef784819a37ed2b9e 100644
--- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
@@ -9,10 +9,10 @@ metadata:
     kubespray.scheduler-cert/serial: "{{ scheduler_cert_serial }}"
 spec:
   hostNetwork: true
-{% if kube_version | version_compare('v1.6', '>=') %}
+{% if kube_version is version('v1.6', '>=') %}
   dnsPolicy: ClusterFirst
 {% endif %}
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
   priorityClassName: system-node-critical
 {% endif %}
   containers:
diff --git a/roles/kubernetes/node/templates/aws-cloud-config.j2 b/roles/kubernetes/node/templates/aws-cloud-config.j2
index 918ab310073340bc491ad47ec55d0fdfd7a9b81a..c1fe086494ae14e8ae2b1f7960bb3ec19c4bca2b 100644
--- a/roles/kubernetes/node/templates/aws-cloud-config.j2
+++ b/roles/kubernetes/node/templates/aws-cloud-config.j2
@@ -1,17 +1,17 @@
 [Global]
-{% if kube_version | version_compare('v1.6', '>=') %}
+{% if kube_version is version('v1.6', '>=') %}
 zone={{ aws_zone|default("") }}
 vpc={{ aws_vpc|default("") }}
 subnetId={{ aws_subnet_id|default("") }}
 routeTableId={{ aws_route_table_id|default("") }}
-{% if kube_version | version_compare('v1.10', '>=') %}
+{% if kube_version is version('v1.10', '>=') %}
 roleArn={{ aws_role_arn|default("") }}
 {% endif %}
 kubernetesClusterTag={{ aws_kubernetes_cluster_tag|default("") }}
 kubernetesClusterId={{ aws_kubernetes_cluster_id|default("") }}
 disableSecurityGroupIngress={{ "true" if aws_disable_security_group_ingress|default(False) else "false" }}
 disableStrictZoneCheck={{ "true" if aws_disable_strict_zone_check|default(False) else "false" }}
-{% if kube_version | version_compare('v1.7', '>=') %}
+{% if kube_version is version('v1.7', '>=') %}
 elbSecurityGroup={{ aws_elb_security_group|default("") }}
 {% endif %}
 {% endif %}
diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
index 96ed45fde3eb01b2229f91c47d43ccab6155a020..d5d771662d86426c5578eb561f9ae54392aaf56e 100644
--- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
+++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
@@ -17,7 +17,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 {# start kubeadm specific settings #}
 --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \
 --kubeconfig={{ kube_config_dir }}/kubelet.conf \
-{% if kube_version | version_compare('v1.8', '<') %}
+{% if kube_version is version('v1.8', '<') %}
 --require-kubeconfig \
 {% endif %}
 {% if kubelet_authentication_token_webhook %}
@@ -29,7 +29,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 --enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} \
 --client-ca-file={{ kube_cert_dir }}/ca.crt \
 --pod-manifest-path={{ kube_manifest_dir }} \
-{% if kube_version | version_compare('v1.12.0', '<') %}
+{% if kube_version is version('v1.12.0', '<') %}
 --cadvisor-port={{ kube_cadvisor_port }} \
 {% endif %}
 {# end kubeadm specific settings #}
@@ -37,7 +37,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 --node-status-update-frequency={{ kubelet_status_update_frequency }} \
 --cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
 --max-pods={{ kubelet_max_pods }} \
-{% if container_manager == 'docker' and kube_version | version_compare('v1.12.0', '<') %}
+{% if container_manager == 'docker' and kube_version is version('v1.12.0', '<') %}
 --docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
 {% endif %}
 {% if container_manager == 'crio' %}
@@ -46,7 +46,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 {% endif %}
 --anonymous-auth=false \
 --read-only-port={{ kube_read_only_port }} \
-{% if kube_version | version_compare('v1.8', '<') %}
+{% if kube_version is version('v1.8', '<') %}
 --experimental-fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
 {% else %}
 --fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2
index 3af478344fd758dce038d593d16d0c7722612c83..f6189ac0b920593131eaaf8fccec2ddba6218863 100644
--- a/roles/kubernetes/node/templates/kubelet.standard.env.j2
+++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2
@@ -12,12 +12,12 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 {# Base kubelet args #}
 {% set kubelet_args_base %}
 --pod-manifest-path={{ kube_manifest_dir }} \
-{% if kube_version | version_compare('v1.12.0', '<') %}
+{% if kube_version is version('v1.12.0', '<') %}
 --cadvisor-port={{ kube_cadvisor_port }} \
 {% endif %}
 --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
 --node-status-update-frequency={{ kubelet_status_update_frequency }} \
-{% if container_manager == 'docker' and kube_version | version_compare('v1.12.0', '<') %}
+{% if container_manager == 'docker' and kube_version is version('v1.12.0', '<') %}
 --docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
 {% endif %}
 --client-ca-file={{ kube_cert_dir }}/ca.pem \
@@ -25,9 +25,9 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 --tls-private-key-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
 --anonymous-auth=false \
 --read-only-port={{ kube_read_only_port }} \
-{% if kube_version | version_compare('v1.6', '>=') %}
+{% if kube_version is version('v1.6', '>=') %}
 {# flag got removed with 1.7.0 #}
-{% if kube_version | version_compare('v1.7', '<') %}
+{% if kube_version is version('v1.7', '<') %}
 --enable-cri={{ kubelet_enable_cri }} \
 {% endif %}
 {% if container_manager == 'crio' %}
@@ -37,7 +37,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 --cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
 --cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
 --max-pods={{ kubelet_max_pods }} \
-{% if kube_version | version_compare('v1.8', '<') %}
+{% if kube_version is version('v1.8', '<') %}
 --experimental-fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
 {% else %}
 --fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
@@ -68,7 +68,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 {% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
 
 {# Location of the apiserver #}
-{% if kube_version | version_compare('v1.8', '<') %}
+{% if kube_version is version('v1.8', '<') %}
 {% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %}
 {% else %}
 {% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml{% endset %}
@@ -76,7 +76,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 
 {% if standalone_kubelet|bool %}
 {# We are on a master-only host. Make the master unschedulable in this case. #}
-{% if kube_version | version_compare('v1.6', '>=') %}
+{% if kube_version is version('v1.6', '>=') %}
 {# Set taints on the master so that it's unschedulable by default. Use node-role.kubernetes.io/master taint like kubeadm. #}
 {% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-with-taints=node-role.kubernetes.io/master=:NoSchedule{% endset %}
 {% else %}
diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
index 8ffcfa524ba36e15a9511409ee13909ecaa5e4d0..83341f5e8230c112ebadebd49b1b6b8221f2bf82 100644
--- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
@@ -9,13 +9,13 @@ metadata:
     kubespray.kube-proxy-cert/serial: "{{ kube_proxy_cert_serial }}"
 spec:
   hostNetwork: true
-{% if kube_version | version_compare('v1.6', '>=') %}
+{% if kube_version is version('v1.6', '>=') %}
   dnsPolicy: ClusterFirst
 {% endif %}
   # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
   nodeSelector:
     beta.kubernetes.io/os: linux
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
   priorityClassName: system-node-critical
 {% endif %}
   containers:
@@ -56,7 +56,7 @@ spec:
     - --masquerade-all
 {% elif kube_proxy_mode == 'ipvs' %}
     - --masquerade-all
-{% if kube_version | version_compare('v1.10', '<') %}
+{% if kube_version is version('v1.10', '<') %}
     - --feature-gates=SupportIPVSProxyMode=true
 {% endif %}
     - --ipvs-min-sync-period=5s
diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
index e3d54b5237df775823fbba7a4c2c44606b516b1b..d3dd5d2964cf3ef1d01af91cf45b989e24330895 100644
--- a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
@@ -10,7 +10,7 @@ spec:
   # When having win nodes in cluster without this patch, this pod cloud try to be created in windows
   nodeSelector:
     beta.kubernetes.io/os: linux
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
   priorityClassName: system-node-critical
 {% endif %}
   containers:
diff --git a/roles/kubernetes/node/templates/vsphere-cloud-config.j2 b/roles/kubernetes/node/templates/vsphere-cloud-config.j2
index 1383f78bbbca45502702d5ff8765df3d89e400ea..53f4cd037b2281b475522dc43421f2c69cf5d238 100644
--- a/roles/kubernetes/node/templates/vsphere-cloud-config.j2
+++ b/roles/kubernetes/node/templates/vsphere-cloud-config.j2
@@ -4,7 +4,7 @@ password = "{{ vsphere_password }}"
 port = {{ vsphere_vcenter_port }}
 insecure-flag = {{ vsphere_insecure }}
 
-{% if kube_version | version_compare('v1.9.2', '>=') %}
+{% if kube_version is version('v1.9.2', '>=') %}
 datacenters = "{{ vsphere_datacenter }}"
 {% else %}
 datastore = "{{ vsphere_datastore }}"
@@ -19,7 +19,7 @@ vm-name = "{{ vsphere_vm_name }}"
 {% endif %}
 {% endif %}
 
-{% if kube_version | version_compare('v1.9.2', '>=') %}
+{% if kube_version is version('v1.9.2', '>=') %}
 
 [VirtualCenter "{{ vsphere_vcenter_ip }}"]
 
diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
index 72f2bf5283c571adf965fcf26a5ba04efc466cab..868240b718a046211a5c5836ae663a504098804d 100644
--- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
@@ -2,7 +2,7 @@
 - name: Stop if ansible version is too low
   assert:
     that:
-      - ansible_version.full|version_compare('2.3.0', '>=')
+      - ansible_version.full is version('2.3.0', '>=')
   run_once: yes
 
 - name: Stop if either kube-master, kube-node or etcd is empty
@@ -114,7 +114,7 @@
 
 - name: Stop if kernel version is too low
   assert:
-    that: ansible_kernel.split('-')[0]|version_compare('4.8', '>=')
+    that: ansible_kernel.split('-')[0] is version('4.8', '>=')
   when: kube_network_plugin == 'cilium'
   ignore_errors: "{{ ignore_assert_errors }}"
 
@@ -146,7 +146,7 @@
 - name: "Check that calico version is enought for upgrade"
   assert:
     that:
-      - calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
+      - calico_version_on_server.stdout is version('v2.6.5', '>=')
     msg: "Your version of calico is not fresh enough for upgrade. Minimum version v2.6.5"
   when:
     - 'calico_version_on_server.stdout is defined'
diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
index 2d1137cbfd5f99be5d53108a6b618848e54bc8fc..fdbe37506edae6161b19a0e518b28695cec76953 100644
--- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
+++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
@@ -4,7 +4,7 @@
     update_cache: yes
     name: '*'
   register: yum_task_result
-  until: yum_task_result|succeeded
+  until: yum_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when:
@@ -15,7 +15,7 @@
 - name: Expire management cache (YUM) for Updation - Redhat
   shell: yum clean expire-cache
   register: expire_cache_output
-  until: expire_cache_output|succeeded
+  until: expire_cache_output is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when:
@@ -27,7 +27,7 @@
 - name: Update package management cache (YUM) - Redhat
   shell: yum makecache
   register: make_cache_output
-  until: make_cache_output|succeeded
+  until: make_cache_output is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when:
@@ -40,7 +40,7 @@
 - name: Update package management cache (zypper) - SUSE
   shell: zypper -n --gpg-auto-import-keys ref
   register: make_cache_output
-  until: make_cache_output|succeeded
+  until: make_cache_output is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when:
@@ -58,7 +58,7 @@
 - name: Install python-dnf for latest RedHat versions
   command: dnf install -y python-dnf yum
   register: dnf_task_result
-  until: dnf_task_result|succeeded
+  until: dnf_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when:
@@ -86,7 +86,7 @@
     name: "{{ item }}"
     state: latest
   register: pkgs_task_result
-  until: pkgs_task_result|succeeded
+  until: pkgs_task_result is succeeded
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   with_items: "{{required_pkgs | default([]) | union(common_required_pkgs|default([]))}}"
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 6215a16ff14ae2c2e17860823b019a12f668a7f6..de7915d8cb8a5f69660670b2cd95b5839134348f 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -296,7 +296,7 @@ feature_gate_v1_12: []
 ## List of key=value pairs that describe feature gates for
 ## the k8s cluster.
 kube_feature_gates: |-
-  {%- if kube_version | version_compare('v1.12.0', '<') -%}
+  {%- if kube_version is version('v1.12.0', '<') -%}
   {{ feature_gate_v1_11 }}
   {%- else -%}
   {{ feature_gate_v1_12 }}
diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml
index 521da1e4ab60d3b58b718e26c3d2f45ce2635142..4a53a6cf627d4405f52830f1007feaac076e197c 100644
--- a/roles/network_plugin/calico/rr/tasks/main.yml
+++ b/roles/network_plugin/calico/rr/tasks/main.yml
@@ -63,7 +63,7 @@
   delay: "{{ retry_stagger | random + 3 }}"
   delegate_to: "{{groups['etcd'][0]}}"
   when:
-    - calico_version | version_compare("v3.0.0", ">=")
+    - calico_version is version("v3.0.0", ">=")
 
 - name: Calico-rr | Configure route reflector (legacy)
   command: |-
@@ -81,7 +81,7 @@
   delay: "{{ retry_stagger | random + 3 }}"
   delegate_to: "{{groups['etcd'][0]}}"
   when:
-    - calico_version | version_compare("v3.0.0", "<")
+    - calico_version is version("v3.0.0", "<")
 
 - meta: flush_handlers
 
diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml
index 14d39bc11c63db037d8e2e4ec83cbe2dbea4fd80..0482432d78bc8cf1ef44f799e68647f2a681b6aa 100644
--- a/roles/network_plugin/calico/tasks/check.yml
+++ b/roles/network_plugin/calico/tasks/check.yml
@@ -20,7 +20,7 @@
     - name: "Check that calico version is enough for upgrade"
       assert:
         that:
-          - calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
+          - calico_version_on_server.stdout is version('v2.6.5', '>=')
         msg: "Your version of calico is not fresh enough for upgrade"
       when: calico_upgrade_enabled
 
@@ -28,8 +28,8 @@
       set_fact:
         calico_upgrade_needed: True
       when:
-        - calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
-        - calico_version_on_server.stdout|version_compare('v3.0.0', '<')
+        - calico_version_on_server.stdout is version('v2.6.5', '>=')
+        - calico_version_on_server.stdout is version('v3.0.0', '<')
 
   when:
     - 'calico_version_on_server.stdout is defined'
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index a76a0650d4a68a8df8adadf4637a1a44fb214f81..123733016782f7db0face0613fd52c63f6bc58ce 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -102,7 +102,7 @@
   delegate_to: "{{ groups['kube-master'][0] }}"
   when:
     - 'calico_conf.stdout == "0"'
-    - calico_version | version_compare("v3.0.0", ">=")
+    - calico_version is version("v3.0.0", ">=")
 
 - name: Calico | Configure calico network pool (legacy)
   shell: >
@@ -119,7 +119,7 @@
   delegate_to: "{{ groups['kube-master'][0] }}"
   when:
     - 'calico_conf.stdout == "0"'
-    - calico_version | version_compare("v3.0.0", "<")
+    - calico_version is version("v3.0.0", "<")
 
 - name: "Determine nodeToNodeMesh needed state"
   set_fact:
@@ -144,19 +144,19 @@
   run_once: true
   delegate_to: "{{ groups['kube-master'][0] }}"
   when:
-    - calico_version | version_compare('v3.0.0', '>=')
+    - calico_version is version('v3.0.0', '>=')
 
 - name: Calico | Set global as_num (legacy)
   command: "{{ bin_dir}}/calicoctl config set asNumber {{ global_as_num }}"
   run_once: true
   when:
-    - calico_version | version_compare('v3.0.0', '<')
+    - calico_version is version('v3.0.0', '<')
 
 - name: Calico | Disable node mesh (legacy)
   command: "{{ bin_dir }}/calicoctl config set nodeToNodeMesh off"
   run_once: yes
   when:
-    - calico_version | version_compare('v3.0.0', '<')
+    - calico_version is version('v3.0.0', '<')
     - nodeToMeshEnabled|default(True)
 
 - name: Calico | Configure node asNumber for per node peering
@@ -176,7 +176,7 @@
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when:
-    - calico_version | version_compare('v3.0.0', '>=')
+    - calico_version is version('v3.0.0', '>=')
     - peer_with_router|default(false)
     - inventory_hostname in groups['k8s-cluster']
     - local_as is defined
@@ -199,7 +199,7 @@
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when:
-    - calico_version | version_compare('v3.0.0', '<')
+    - calico_version is version('v3.0.0', '<')
     - peer_with_router|default(false)
     - inventory_hostname in groups['k8s-cluster']
     - local_as is defined
@@ -223,7 +223,7 @@
   with_items:
     - "{{ peers|rejectattr('scope','equalto', 'global')|default([]) }}"
   when:
-    - calico_version | version_compare('v3.0.0', '>=')
+    - calico_version is version('v3.0.0', '>=')
     - peer_with_router|default(false)
     - inventory_hostname in groups['k8s-cluster']
 
@@ -280,7 +280,7 @@
   with_items: "{{ peers|selectattr('scope','equalto', 'global')|default([]) }}"
   run_once: true
   when:
-    - calico_version | version_compare('v3.0.0', '<')
+    - calico_version is version('v3.0.0', '<')
     - peer_with_router|default(false)
     - inventory_hostname in groups['k8s-cluster']
 
@@ -302,7 +302,7 @@
   with_items:
     - "{{ groups['calico-rr'] | default([]) }}"
   when:
-    - calico_version | version_compare('v3.0.0', '>=')
+    - calico_version is version('v3.0.0', '>=')
     - peer_with_calico_rr|default(false)
     - inventory_hostname in groups['k8s-cluster']
     - hostvars[item]['cluster_id'] == cluster_id
@@ -322,7 +322,7 @@
   delay: "{{ retry_stagger | random + 3 }}"
   with_items: "{{ groups['calico-rr'] | default([]) }}"
   when:
-    - calico_version | version_compare('v3.0.0', '<')
+    - calico_version is version('v3.0.0', '<')
     - not calico_upgrade_enabled
     - peer_with_calico_rr|default(false)
     - hostvars[item]['cluster_id'] == cluster_id
diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2
index 1cfac91abc86501efd8ba9c8fd32dd625782ae6d..dfee561341bdb4f99ba00a02600415faf626a765 100644
--- a/roles/network_plugin/calico/templates/calico-node.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-node.yml.j2
@@ -22,7 +22,7 @@ spec:
         scheduler.alpha.kubernetes.io/critical-pod: ''
         kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
 {% endif %}
       hostNetwork: true
diff --git a/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2
index e0d0c7cff746356a45595e46e16cda955c4eaf31..68a6b99102c87eb0fa13b86f69815704d859ae1b 100644
--- a/roles/network_plugin/canal/templates/canal-node.yaml.j2
+++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2
@@ -18,7 +18,7 @@ spec:
       labels:
         k8s-app: canal-node
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
 {% endif %}
       hostNetwork: true
diff --git a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
index ff76d6d7c8b12ca9571ea37a29c8deda5818812f..b8a6306cf7d9a05b6b73c7e814e9297f3aa1e847 100755
--- a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
@@ -32,7 +32,7 @@ spec:
         prometheus.io/port: "9090"
 {% endif %}
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
 {% endif %}
       serviceAccountName: cilium
diff --git a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
index 706027623cbce81b9a041918fd327ccfca156797..d1cd66b50f59e3241e6f646a5f40a751c7d384d2 100644
--- a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
@@ -19,7 +19,7 @@ spec:
         # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
 {% endif %}
       # The API proxy must run in the host network namespace so that
diff --git a/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2 b/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
index 3f715a47362d2494adbde01455ef46495b938c76..73111072ca6d91a8cde343fad74d47395e3d7cbf 100644
--- a/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
@@ -18,7 +18,7 @@ spec:
         # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
 {% endif %}
       hostNetwork: true
diff --git a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
index 7e826a3bfc4bf7243fdf46a3f24129f56dcca2cb..eb39b87dec1a2f899fc3cf9727381419c1290eef 100644
--- a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
@@ -17,7 +17,7 @@ spec:
       annotations:
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
 {% endif %}
       hostNetwork: true
diff --git a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
index 134c9c5b5d207188c0169fb06cbdfc04c33101fa..460bf9fb374a0007e81b9b52d96bf3cd4ae995ac 100644
--- a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
@@ -17,7 +17,7 @@ spec:
       annotations:
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
 {% endif %}
       hostNetwork: true
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
index 55481b2615d993c9205dccfaf0249af050689273..0c0ad5938be65de430e237c9d3774841c36210ed 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
@@ -19,7 +19,7 @@ spec:
         # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
 {% endif %}
       # The netmaster must run in the host network namespace so that
diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
index 4a996edeaf447d329b7d201fb851e281d358086a..045b9e7eb281ff8e9b27688dbe53b16441569f81 100644
--- a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
@@ -23,7 +23,7 @@ spec:
         # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
 {% endif %}
       hostNetwork: true
diff --git a/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2 b/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
index 0ded7fe7e8e5469bef8c237a5c863570d2edbeb4..40c37b6ad75a17c8e3e010aedbebd23c2d9df6f4 100644
--- a/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
@@ -20,7 +20,7 @@ spec:
         # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
 {% endif %}
       hostNetwork: true
diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
index 3f211945743370cfe236c974f66577d69e12a9c4..2e09d03836831b480a39a88b64b4ef3f9ab69d17 100644
--- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
+++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
@@ -55,7 +55,7 @@ spec:
         # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
 {% endif %}
       serviceAccountName: flannel
diff --git a/roles/network_plugin/kube-router/templates/kube-router.yml.j2 b/roles/network_plugin/kube-router/templates/kube-router.yml.j2
index eb150daf14a781f4ccdc365989f45803d7fc82c3..e939e89f928f3dba48c06eafbbd3b358e7793c44 100644
--- a/roles/network_plugin/kube-router/templates/kube-router.yml.j2
+++ b/roles/network_plugin/kube-router/templates/kube-router.yml.j2
@@ -63,7 +63,7 @@ spec:
       annotations:
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-cluster-critical
 {% endif %}
       serviceAccountName: kube-router
diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2
index b8a9a6871af447bbfa4f08d76c26a3becd377686..6c07a7ee86497d4b091d2a3d20e0e01963e67e59 100644
--- a/roles/network_plugin/weave/templates/weave-net.yml.j2
+++ b/roles/network_plugin/weave/templates/weave-net.yml.j2
@@ -118,7 +118,7 @@ items:
             # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
             scheduler.alpha.kubernetes.io/critical-pod: ''
         spec:
-{% if kube_version|version_compare('v1.11.1', '>=') %}
+{% if kube_version is version('v1.11.1', '>=') %}
           priorityClassName: system-node-critical
 {% endif %}
           containers:
diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml
index 6eaa041c14373b3a75c68da245c995e7a2d5c2ce..9105d28eaf2ce83df4f08d5c537be38e7c466fa8 100644
--- a/roles/upgrade/pre-upgrade/tasks/main.yml
+++ b/roles/upgrade/pre-upgrade/tasks/main.yml
@@ -44,7 +44,7 @@
 
 - name: Ensure minimum version for drain label selector if necessary
   assert:
-    that: "kubectl_version.stdout.split(' ')[-1] | version_compare('v1.10.0', '>=')"
+    that: "kubectl_version.stdout.split(' ')[-1] is version('v1.10.0', '>=')"
   when:
     - drain_nodes
     - needs_cordoning
diff --git a/roles/vault/handlers/main.yml b/roles/vault/handlers/main.yml
index 3aeb750412b3b436ef910db283fb24d1ebc99601..e6dd20ef04eb04f36479e8933c813800aaa80811 100644
--- a/roles/vault/handlers/main.yml
+++ b/roles/vault/handlers/main.yml
@@ -12,7 +12,7 @@
     headers: "{{ vault_client_headers }}"
     status_code: "{{ vault_successful_http_codes | join(',') }}"
   register: vault_health_check
-  until: vault_health_check|succeeded
+  until: vault_health_check is succeeded
   retries: 10
   delay: "{{ retry_stagger | random + 3 }}"
   run_once: yes
diff --git a/roles/vault/tasks/bootstrap/start_vault_temp.yml b/roles/vault/tasks/bootstrap/start_vault_temp.yml
index 3720d9beb23771848943495cefc01c4848b9ca30..9ff327366c6c8f6b92dbbec9ad5c9f2b95bae5a0 100644
--- a/roles/vault/tasks/bootstrap/start_vault_temp.yml
+++ b/roles/vault/tasks/bootstrap/start_vault_temp.yml
@@ -20,7 +20,7 @@
     url: "http://localhost:{{ vault_port }}/"
     secret_shares: 1
     secret_threshold: 1
-  until: "vault_temp_init|succeeded"
+  until: "vault_temp_init is succeeded"
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   register: vault_temp_init
diff --git a/roles/vault/tasks/shared/check_vault.yml b/roles/vault/tasks/shared/check_vault.yml
index 999a36f32ba5ba27ba4afb5d2e15f4473b1def62..5543fc03843f819bd18ddca2e1b2b8b4a9cdfd59 100644
--- a/roles/vault/tasks/shared/check_vault.yml
+++ b/roles/vault/tasks/shared/check_vault.yml
@@ -4,7 +4,7 @@
   shell: docker stop {{ vault_temp_container_name }} || rkt stop {{ vault_temp_container_name }}
   failed_when: false
   register: vault_temp_stop
-  changed_when: vault_temp_stop|succeeded
+  changed_when: vault_temp_stop is succeeded
 
 # Check if vault is reachable on the localhost
 - name: check_vault | Attempt to pull local https Vault health
diff --git a/roles/vault/tasks/shared/find_leader.yml b/roles/vault/tasks/shared/find_leader.yml
index 1c1dcdf3054604695bdc8faf8422ec0077225b87..398b8d570d7b478404f8bc78bb9e9e522ea2d1c4 100644
--- a/roles/vault/tasks/shared/find_leader.yml
+++ b/roles/vault/tasks/shared/find_leader.yml
@@ -7,7 +7,7 @@
     method: HEAD
     status_code: 200,429,501,503
   register: vault_leader_check
-  until: "vault_leader_check|succeeded"
+  until: "vault_leader_check is succeeded"
   retries: 10
 
 - name: find_leader | Set fact for current http leader
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 31f16f9113e30eb46aa6f9f4b7dbcbca4808aece..acf8658f97b5c69396325781ae3e5b3918d8576f 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -55,7 +55,7 @@
       no_log: true
 
     - debug: var=agents.content|from_json
-      failed_when: not agents|success and not agents.content=='{}'
+      failed_when: not agents is success and not agents.content=='{}'
       run_once: true
 
     - name: Check netchecker status
@@ -70,7 +70,7 @@
       when: not agents.content=='{}'
 
     - debug: var=result.content|from_json
-      failed_when: not result|success
+      failed_when: not result is success
       run_once: true
       when: not agents.content=='{}'
       delegate_to: "{{groups['kube-master'][0]}}"