diff --git a/docs/ha-mode.md b/docs/ha-mode.md
index 0c9f54ef9c3296e9d4223b1e948af1226e21f6c8..cb83801eab20630e97ee328475bc0b1d8a9966b8 100644
--- a/docs/ha-mode.md
+++ b/docs/ha-mode.md
@@ -24,7 +24,7 @@ where an external LB or virtual IP management is inconvenient.  This option is
 configured by the variable `loadbalancer_apiserver_localhost` (defaults to
 `True`. Or `False`, if there is an external `loadbalancer_apiserver` defined).
 You may also define the port the local internal loadbalancer uses by changing,
-`nginx_kube_apiserver_port`.  This defaults to the value of
+`loadbalancer_apiserver_port`.  This defaults to the value of
 `kube_apiserver_port`. It is also important to note that Kubespray will only
 configure kubelet and kube-proxy on non-master nodes to use the local internal
 loadbalancer.
@@ -114,7 +114,7 @@ Where:
 * `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
 * `lc` - localhost;
 * `bip` - a custom bind IP or localhost for the default bind IP '0.0.0.0';
-* `nsp` - nginx secure port, `nginx_kube_apiserver_port`, defers to `sp`;
+* `nsp` - nginx secure port, `loadbalancer_apiserver_port`, defers to `sp`;
 * `sp` - secure port, `kube_apiserver_port`;
 * `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
 * `ip` - the node IP, defers to the ansible IP;
diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml
index e7c62e93281f9a8a4b61db2490bab2476d046567..663277266172f28085f48de644522f18f6dbd92c 100644
--- a/inventory/sample/group_vars/all/all.yml
+++ b/inventory/sample/group_vars/all/all.yml
@@ -20,13 +20,15 @@ bin_dir: /usr/local/bin
 #   port: 1234
 
 ## Internal loadbalancers for apiservers
-# loadbalancer_apiserver_localhost: true
+loadbalancer_apiserver_localhost: true
+loadbalancer_apiserver_type: haproxy
 
 ## Local loadbalancer should use this port
 ## And must be set port 6443
-nginx_kube_apiserver_port: 6443
-## If nginx_kube_apiserver_healthcheck_port variable defined, enables proxy liveness check.
-nginx_kube_apiserver_healthcheck_port: 8081
+loadbalancer_apiserver_port: 6443
+
+## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
+loadbalancer_apiserver_healthcheck_port: 8081
 
 ### OTHER OPTIONAL VARIABLES
 ## For some things, kubelet needs to load kernel modules.  For example, dynamic kernel services are needed
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index cf038e46b7ba67c4e4cf970ede5b37a604030115..42a0fed068708a1939eedcaa76b22e694b1feaf8 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -202,6 +202,9 @@ multus_image_tag: "{{ multus_version }}"
 nginx_image_repo: nginx
 nginx_image_tag: 1.15
 
+haproxy_image_repo: haproxy
+haproxy_image_tag: 1.9
+
 coredns_version: "1.4.0"
 coredns_image_repo: "coredns/coredns"
 coredns_image_tag: "{{ coredns_version }}"
@@ -485,7 +488,7 @@ downloads:
       - k8s-cluster
 
   nginx:
-    enabled: "{{ loadbalancer_apiserver_localhost }}"
+    enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' }}"
     container: true
     repo: "{{ nginx_image_repo }}"
     tag: "{{ nginx_image_tag }}"
@@ -493,6 +496,15 @@ downloads:
     groups:
       - kube-node
 
+  haproxy:
+    enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy' }}"
+    container: true
+    repo: "{{ haproxy_image_repo }}"
+    tag: "{{ haproxy_image_tag }}"
+    sha256: "{{ haproxy_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+
   coredns:
     enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
     container: true
diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
index c802ab91efd25da1d09954259402635bfea2fbcd..7eb45c89b224fde44dc86dcda39b7989b10aacf4 100644
--- a/roles/kubernetes/node/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -42,9 +42,9 @@ kube_master_cpu_reserved: 200m
 
 kubelet_status_update_frequency: 10s
 
-# Requests for nginx load balancer app
-nginx_memory_requests: 32M
-nginx_cpu_requests: 25m
+# Requests for load balancer app
+loadbalancer_apiserver_memory_requests: 32M
+loadbalancer_apiserver_cpu_requests: 25m
 
 # kube_api_runtime_config:
 #   - extensions/v1beta1/daemonsets=true
diff --git a/roles/kubernetes/node/tasks/haproxy.yml b/roles/kubernetes/node/tasks/haproxy.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ed899a3f4b0da0d0f640805d148e254a0749c897
--- /dev/null
+++ b/roles/kubernetes/node/tasks/haproxy.yml
@@ -0,0 +1,25 @@
+---
+- name: haproxy | Cleanup potentially deployed nginx-proxy
+  file:
+    path: "{{ kube_manifest_dir }}/nginx-proxy.yml"
+    state: absent
+
+- name: haproxy | Write static pod
+  template:
+    src: manifests/haproxy.manifest.j2
+    dest: "{{ kube_manifest_dir }}/haproxy.yml"
+
+- name: haproxy | Make haproxy directory
+  file:
+    path: "{{ haproxy_config_dir }}"
+    state: directory
+    mode: 0700
+    owner: root
+
+- name: haproxy | Write haproxy configuration
+  template:
+    src: haproxy.cfg.j2
+    dest: "{{ haproxy_config_dir }}/haproxy.cfg"
+    owner: root
+    mode: 0755
+    backup: yes
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 24894848aaa1bc2f2d8ae225b42ece57c25f7d8b..ef0be57baa08d219235912c3b0a609e3588d96f6 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -18,10 +18,15 @@
     - kubelet
 
 - import_tasks: nginx-proxy.yml
-  when: is_kube_master == false and loadbalancer_apiserver_localhost
+  when: is_kube_master == false and loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx'
   tags:
     - nginx
 
+- import_tasks: haproxy.yml
+  when: is_kube_master == false and loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy'
+  tags:
+    - haproxy
+
 - name: Make sure dynamic kubelet configuration directory is writeable
   file:
     path: "{{ dynamic_kubelet_configuration_dir }}"
diff --git a/roles/kubernetes/node/tasks/nginx-proxy.yml b/roles/kubernetes/node/tasks/nginx-proxy.yml
index 4b3b5f2f5a2550cbbb6c25711b012a3c694dbaa5..36c1d7306b604c2fa6ba87678e1e95f62bf19b1c 100644
--- a/roles/kubernetes/node/tasks/nginx-proxy.yml
+++ b/roles/kubernetes/node/tasks/nginx-proxy.yml
@@ -1,8 +1,13 @@
 ---
+- name: haproxy | Cleanup potentially deployed haproxy
+  file:
+    path: "{{ kube_manifest_dir }}/haproxy.yml"
+    state: absent
+
 - name: nginx-proxy | Write static pod
   template:
     src: manifests/nginx-proxy.manifest.j2
-    dest: "{{kube_manifest_dir}}/nginx-proxy.yml"
+    dest: "{{ kube_manifest_dir }}/nginx-proxy.yml"
 
 - name: nginx-proxy | Make nginx directory
   file:
diff --git a/roles/kubernetes/node/templates/haproxy.cfg.j2 b/roles/kubernetes/node/templates/haproxy.cfg.j2
new file mode 100644
index 0000000000000000000000000000000000000000..76466b00810db7e1ee6ca6d10b31461c8a62c305
--- /dev/null
+++ b/roles/kubernetes/node/templates/haproxy.cfg.j2
@@ -0,0 +1,43 @@
+global
+    maxconn                 4000
+    log                     127.0.0.1 local0
+
+defaults
+    mode                    http
+    log                     global
+    option                  httplog
+    option                  dontlognull
+    option                  http-server-close
+    option                  redispatch
+    retries                 5
+    timeout http-request    5m
+    timeout queue           5m
+    timeout connect         30s
+    timeout client          15m
+    timeout server          15m
+    timeout http-keep-alive 30s
+    timeout check           30s
+    maxconn                 4000
+
+{% if loadbalancer_apiserver_healthcheck_port is defined -%}
+frontend healthz
+  bind *:{{ loadbalancer_apiserver_healthcheck_port }}
+  mode http
+  monitor-uri /healthz
+{% endif %}
+
+frontend kube_api_frontend
+  bind *:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
+  mode tcp
+  option tcplog
+  default_backend kube_api_backend
+
+backend kube_api_backend
+  mode tcp
+  balance leastconn
+  default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100
+  option httpchk GET /healthz
+  http-check expect status 200
+  {% for host in groups['kube-master'] -%}
+  server {{ host }} {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }} check check-ssl verify none
+  {% endfor -%}
diff --git a/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2
new file mode 100644
index 0000000000000000000000000000000000000000..e0cca903f2e63728131928fcf7cce486880bda7c
--- /dev/null
+++ b/roles/kubernetes/node/templates/manifests/haproxy.manifest.j2
@@ -0,0 +1,43 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: haproxy
+  namespace: kube-system
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+    k8s-app: kube-haproxy
+spec:
+  hostNetwork: true
+  nodeSelector:
+    beta.kubernetes.io/os: linux
+{% if kube_version is version('v1.11.1', '>=') %}
+  priorityClassName: system-node-critical
+{% endif %}
+  containers:
+  - name: haproxy
+    image: {{ haproxy_image_repo }}:{{ haproxy_image_tag }}
+    imagePullPolicy: {{ k8s_image_pull_policy }}
+    resources:
+      requests:
+        cpu: {{ loadbalancer_apiserver_cpu_requests }}
+        memory: {{ loadbalancer_apiserver_memory_requests }}
+    securityContext:
+      privileged: true
+    {% if loadbalancer_apiserver_healthcheck_port is defined -%}
+    livenessProbe:
+      httpGet:
+        path: /healthz
+        port: {{ loadbalancer_apiserver_healthcheck_port }}
+    readinessProbe:
+      httpGet:
+        path: /healthz
+        port: {{ loadbalancer_apiserver_healthcheck_port }}
+    {% endif -%}
+    volumeMounts:
+    - mountPath: /usr/local/etc/haproxy/
+      name: etc-haproxy
+      readOnly: true
+  volumes:
+  - name: etc-haproxy
+    hostPath:
+      path: {{ haproxy_config_dir }}
diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
index ed52f647eaebd2fa5a65ba1f61c5b81075ecd829..18e85b3faab7d6aaf57adc4d71e88532afe0ec16 100644
--- a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
@@ -19,19 +19,19 @@ spec:
     imagePullPolicy: {{ k8s_image_pull_policy }}
     resources:
       requests:
-        cpu: {{ nginx_cpu_requests }}
-        memory: {{ nginx_memory_requests }}
+        cpu: {{ loadbalancer_apiserver_cpu_requests }}
+        memory: {{ loadbalancer_apiserver_memory_requests }}
     securityContext:
       privileged: true
-    {% if nginx_kube_apiserver_healthcheck_port is defined -%}
+    {% if loadbalancer_apiserver_healthcheck_port is defined -%}
     livenessProbe:
       httpGet:
         path: /healthz
-        port: {{ nginx_kube_apiserver_healthcheck_port }}
+        port: {{ loadbalancer_apiserver_healthcheck_port }}
     readinessProbe:
       httpGet:
         path: /healthz
-        port: {{ nginx_kube_apiserver_healthcheck_port }}
+        port: {{ loadbalancer_apiserver_healthcheck_port }}
     {% endif -%}
     volumeMounts:
     - mountPath: /etc/nginx
diff --git a/roles/kubernetes/node/templates/nginx.conf.j2 b/roles/kubernetes/node/templates/nginx.conf.j2
index 274139529c78544f48724a9e332699f41302221b..0c869d94ae85e4efc6e39974a715401e87d0f4c8 100644
--- a/roles/kubernetes/node/templates/nginx.conf.j2
+++ b/roles/kubernetes/node/templates/nginx.conf.j2
@@ -19,7 +19,7 @@ stream {
   }
 
   server {
-    listen        127.0.0.1:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }};
+    listen        {{ loadbalancer_apiserver_port|default(kube_apiserver_port) }};
     proxy_pass    kube_apiserver;
     proxy_timeout 10m;
     proxy_connect_timeout 1s;
@@ -38,13 +38,13 @@ http {
   server_tokens off;
   autoindex off;
 
-  {% if nginx_kube_apiserver_healthcheck_port is defined -%}
+  {% if loadbalancer_apiserver_healthcheck_port is defined -%}
   server {
-    listen {{ nginx_kube_apiserver_healthcheck_port }};
+    listen {{ loadbalancer_apiserver_healthcheck_port }};
     location /healthz {
       access_log off;
       return 200;
     }
   }
-  {% endif -%}
+  {% endif %}
 }
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index c6407cb329fcb5670be9ae87841ba9446cf4e2f7..0d59873b32279cd668202fe06a18b5b99c878bf7 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -37,6 +37,9 @@ ignore_assert_errors: false
 # nginx-proxy configure
 nginx_config_dir: "/etc/nginx"
 
+# haproxy configure
+haproxy_config_dir: "/etc/haproxy"
+
 # Directory where the binaries will be installed
 bin_dir: /usr/local/bin
 docker_bin_dir: /usr/bin
@@ -415,13 +418,14 @@ kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
 kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
 first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(fallback_ips[groups['kube-master'][0]])) }}"
 loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}"
+loadbalancer_apiserver_type: "nginx"
 # applied if only external loadbalancer_apiserver is defined, otherwise ignored
 apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
 kube_apiserver_endpoint: |-
   {% if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
        https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
   {%- elif not is_kube_master and loadbalancer_apiserver_localhost -%}
-       https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
+       https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
   {%- elif is_kube_master -%}
        https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }}
   {%- else -%}
diff --git a/roles/network_plugin/contiv/tasks/main.yml b/roles/network_plugin/contiv/tasks/main.yml
index a5be03facf1fb557f80cfd6ede4e1add42a9ac54..d626cbd689f8ba01316324c0f9cb240821fd7405 100644
--- a/roles/network_plugin/contiv/tasks/main.yml
+++ b/roles/network_plugin/contiv/tasks/main.yml
@@ -22,7 +22,7 @@
   set_fact:
     kube_apiserver_endpoint_for_contiv: |-
       {% if not is_kube_master and loadbalancer_apiserver_localhost -%}
-      https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
+      https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
       {%- elif loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
       https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}
       {%-   if loadbalancer_apiserver.port|string != "443" -%}
diff --git a/tests/files/gce_ubuntu-flannel-ha.yml b/tests/files/gce_ubuntu-flannel-ha.yml
index 54aeac1c82777188c3257b08f081f4daec9f8743..4057b27faa23ac6db56c640be735322065a58820 100644
--- a/tests/files/gce_ubuntu-flannel-ha.yml
+++ b/tests/files/gce_ubuntu-flannel-ha.yml
@@ -12,3 +12,4 @@ skip_non_kubeadm_warning: true
 deploy_netchecker: true
 dns_min_replicas: 1
 cloud_provider: gce
+loadbalancer_apiserver_type: haproxy