diff --git a/test/pod_test.go b/test/pod_test.go
index ed52ca36edbdf2953470708eb6e311c95dbf226c..cf040529c51d464902ece9ae8c757c5771b626d7 100644
--- a/test/pod_test.go
+++ b/test/pod_test.go
@@ -82,38 +82,38 @@ func TestPVCTestSuite(t *testing.T) {
 func (p *PodTestSuite) TestPodWithHostPathVolume() {
 	p.kustomizeDir = "pod"
 
-	runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
+	runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
 }
 
 func (p *PodTestSuite) TestPodWithLocalVolume() {
 	p.kustomizeDir = "pod-with-local-volume"
 
-	runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType)
+	runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType)
 }
 
 func (p *PodTestSuite) TestPodWithLocalVolumeDefault() {
 	p.kustomizeDir = "pod-with-default-local-volume"
 
-	runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType)
+	runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType)
 }
 
 func (p *PodTestSuite) TestPodWithNodeAffinity() {
 	p.kustomizeDir = "pod-with-node-affinity"
 
-	runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
+	runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
 }
 
 func (p *PodTestSuite) TestPodWithRWOPVolume() {
 	p.kustomizeDir = "pod-with-rwop-volume"
 
-	runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType)
+	runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType)
 }
 
 func (p *PodTestSuite) TestPodWithSecurityContext() {
 	p.kustomizeDir = "pod-with-security-context"
 	kustomizeDir := testdataFile(p.kustomizeDir)
 
-	runTest(p, []string{p.config.IMAGE}, "podscheduled", hostPathVolumeType)
+	runTest(p, []string{p.config.IMAGE}, waitCondition("podscheduled"), hostPathVolumeType)
 
 	cmd := fmt.Sprintf(`kubectl get pod -l %s=%s -o=jsonpath='{.items[0].status.conditions[?(@.type=="Ready")].reason}'`, LabelKey, LabelValue)
 
@@ -142,22 +142,33 @@ loop:
 func (p *PodTestSuite) TestPodWithSubpath() {
 	p.kustomizeDir = "pod-with-subpath"
 
-	runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
+	runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
 }
 
 func (p *PodTestSuite) xxTestPodWithMultipleStorageClasses() {
 	p.kustomizeDir = "multiple-storage-classes"
 
-	runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
+	runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
 }
 
 func (p *PodTestSuite) TestPodWithCustomPathPatternStorageClasses() {
 	p.kustomizeDir = "custom-path-pattern"
 
-	runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
+	runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
 }
 
-func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string) {
+func (p *PodTestSuite) TestPodWithLegacyAffinityConstraint() {
+	// The helper pod should be correctly scheduled
+	p.kustomizeDir = "pv-with-legacy-affinity"
+
+	runTest(p, []string{p.config.IMAGE}, "kubectl wait pv pvc-to-clean-up --for delete --timeout=120s", "")
+}
+
+func waitCondition(waitCondition string) string {
+	return fmt.Sprintf("kubectl wait pod -l %s=%s --for condition=%s --timeout=120s", LabelKey, LabelValue, waitCondition)
+}
+
+func runTest(p *PodTestSuite, images []string, waitCmd, volumeType string) {
 	kustomizeDir := testdataFile(p.kustomizeDir)
 
 	var cmds []string
@@ -171,7 +182,7 @@ func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string)
 		cmds,
 		fmt.Sprintf("kustomize edit add label %s:%s -f", LabelKey, LabelValue),
 		"kustomize build | kubectl apply -f -",
-		fmt.Sprintf("kubectl wait pod -l %s=%s --for condition=%s --timeout=120s", LabelKey, LabelValue, waitCondition),
+		waitCmd,
 	)
 
 	for _, cmd := range cmds {
@@ -188,13 +199,15 @@ func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string)
 		}
 	}
 
-	typeCheckCmd := fmt.Sprintf("kubectl get pv $(%s) -o jsonpath='{.spec.%s}'", "kubectl get pv -o jsonpath='{.items[0].metadata.name}'", volumeType)
-	c := createCmd(p.T(), typeCheckCmd, kustomizeDir, p.config.envs(), nil)
-	typeCheckOutput, err := c.CombinedOutput()
-	if err != nil {
-		p.FailNow("", "failed to check volume type: %v", err)
-	}
-	if len(typeCheckOutput) == 0 || !strings.Contains(string(typeCheckOutput), "path") {
-		p.FailNow("volume Type not correct")
+	if volumeType != "" {
+		typeCheckCmd := fmt.Sprintf("kubectl get pv $(%s) -o jsonpath='{.spec.%s}'", "kubectl get pv -o jsonpath='{.items[0].metadata.name}'", volumeType)
+		c := createCmd(p.T(), typeCheckCmd, kustomizeDir, p.config.envs(), nil)
+		typeCheckOutput, err := c.CombinedOutput()
+		if err != nil {
+			p.FailNow("", "failed to check volume type: %v", err)
+		}
+		if len(typeCheckOutput) == 0 || !strings.Contains(string(typeCheckOutput), "path") {
+			p.FailNow("volume Type not correct")
+		}
 	}
 }
diff --git a/test/testdata/kind-cluster.yaml b/test/testdata/kind-cluster.yaml
index 5d48018e18baa7c4ac72ade64accf1b170c79bef..9d1fb8acefd398a24e7dcde56fe4c36f45aba42d 100644
--- a/test/testdata/kind-cluster.yaml
+++ b/test/testdata/kind-cluster.yaml
@@ -3,4 +3,8 @@ kind: Cluster
 nodes:
   - role: control-plane
   - role: worker
+    labels:
+      kubernetes.io/hostname: kind-worker1.hostname
   - role: worker
+    labels:
+      kubernetes.io/hostname: kind-worker2.hostname
diff --git a/test/testdata/pod-with-node-affinity/patch.yaml b/test/testdata/pod-with-node-affinity/patch.yaml
index 204d775d3d7009eb3f0ab7b7eaccab1137d3d4f5..efbf3d19a9300788d37bfbadb2b5c654d8d18911 100644
--- a/test/testdata/pod-with-node-affinity/patch.yaml
+++ b/test/testdata/pod-with-node-affinity/patch.yaml
@@ -11,4 +11,4 @@ spec:
               - key: kubernetes.io/hostname
                 operator: In
                 values:
-                  - kind-worker
+                  - kind-worker1.hostname
diff --git a/test/testdata/pv-with-legacy-affinity/kustomization.yaml b/test/testdata/pv-with-legacy-affinity/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b0f1729c75e3421f870bfc4e9ab9d0d4b016dd56
--- /dev/null
+++ b/test/testdata/pv-with-legacy-affinity/kustomization.yaml
@@ -0,0 +1,10 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+- ../../../deploy
+- pv.yaml
+commonLabels:
+  app: local-path-provisioner
+images:
+- name: rancher/local-path-provisioner
+  newTag: dev
\ No newline at end of file
diff --git a/test/testdata/pv-with-legacy-affinity/pv.yaml b/test/testdata/pv-with-legacy-affinity/pv.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e13781c27a707ad379bd2489ca409c117d297a00
--- /dev/null
+++ b/test/testdata/pv-with-legacy-affinity/pv.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  annotations:
+    local.path.provisioner/selected-node: kind-worker
+    pv.kubernetes.io/provisioned-by: rancher.io/local-path
+  finalizers:
+    - kubernetes.io/pv-protection
+  labels:
+    test/avoid-cleanup: "true"
+  name: pvc-to-clean-up
+spec:
+  accessModes:
+    - ReadWriteOnce
+  capacity:
+    storage: 100Mi
+  hostPath:
+    path: /opt/local-path-provisioner/default/local-path-pvc
+    type: DirectoryOrCreate
+  nodeAffinity:
+    required:
+      nodeSelectorTerms:
+        - matchExpressions:
+            - key: kubernetes.io/hostname
+              operator: In
+              values:
+                - kind-worker1.hostname
+  claimRef:
+    apiVersion: v1
+    kind: PersistentVolumeClaim
+    name: no-such-pvc
+    namespace: default
+    # The PVC "definitely doesn't exist any more"
+    resourceVersion: "1"
+    uid: 12345678-1234-5678-9abc-123456789abc
+  persistentVolumeReclaimPolicy: Delete
+  storageClassName: local-path-custom-path-pattern
+  volumeMode: Filesystem
diff --git a/test/util.go b/test/util.go
index 7aedec0b6569bbd58341dd5bdd358ea1bbe3dd0c..b20cebef706a4d6e737a256c344c38f48d74d4ab 100644
--- a/test/util.go
+++ b/test/util.go
@@ -78,7 +78,7 @@ func testdataFile(fields ...string) string {
 func deleteKustomizeDeployment(t *testing.T, kustomizeDir string, envs []string) error {
 	_, err := runCmd(
 		t,
-		"kustomize build | kubectl delete --timeout=180s -f -",
+		"kustomize build | kubectl delete --timeout=180s -f - -l 'test/avoid-cleanup!=true'",
 		testdataFile(kustomizeDir),
 		envs,
 		nil,