Skip to content
Snippets Groups Projects
Commit 87408857 authored by nicktming's avatar nicktming Committed by Sheng Yang
Browse files

make helper pod configurable and add xfs quota example

parent c3192ec3
Branches
Tags
No related merge requests found
......@@ -132,12 +132,49 @@ data:
}
setup: |-
#!/bin/sh
path=$1
mkdir -m 0777 -p ${path}
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
teardown: |-
#!/bin/sh
path=$1
rm -rf ${path}
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: busybox
```
......@@ -158,12 +195,17 @@ The configuration must obey following rules:
3. No duplicate paths allowed for one node.
4. No duplicate node allowed.
#### Scripts `setup` and `teardown`
#### Scripts `setup` and `teardown` and `helperPod.yaml`
The script `setup` will be executed before the volume is created, to prepare the directory on the node for the volume.
The script `teardown` will be executed after the volume is deleted, to cleanup the directory on the node for the volume.
The yaml file `helperPod.yaml` will be created by local-path-storage to execute `setup` or `teardown` script with three paramemters `-p <path> -s <size> -m <mode>` :
* path: the absolute path provisioned on the node
- size: pvc.Spec.resources.requests.storage in bytes
* mode: pvc.Spec.VolumeMode
#### Reloading
The provisioner supports automatic configuration reloading. Users can change the configuration using `kubectl apply` or `kubectl edit` with config map `local-path-config`. There is a delay between when the user updates the config map and the provisioner picking it up.
......@@ -198,7 +240,7 @@ git clone https://github.com/rancher/local-path-provisioner.git
cd local-path-provisioner
go build
kubectl apply -f debug/config.yaml
./local-path-provisioner --debug start --namespace=local-path-storage
./local-path-provisioner --debug start --service-account-name=default
```
### example
......
......@@ -39,10 +39,47 @@ data:
}
setup: |-
#!/bin/sh
path=$1
mkdir -m 0777 -p ${path}
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
teardown: |-
#!/bin/sh
path=$1
rm -rf ${path}
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: busybox
......@@ -71,9 +71,11 @@ default values.
| `nodeSelector` | Node labels for Local Path Provisioner pod assignment | `{}` |
| `tolerations` | Node taints to tolerate | `[]` |
| `affinity` | Pod affinity | `{}` |
| `configmap.setup` | Configuration of script to execute setup operations on each node | #!/bin/sh<br>path=$1<br>mkdir -m 0777 -p ${path} |
| `configmap.teardown` | Configuration of script to execute teardown operations on each node | #!/bin/sh<br>path=$1<br>rm -rf ${path} |
| `configmap.setup` | Configuration of script to execute setup operations on each node | #!/bin/sh<br>while getopts "m:s:p:" opt<br>do<br>&emsp;case $opt in <br>&emsp;&emsp;p)<br>&emsp;&emsp;absolutePath=$OPTARG<br>&emsp;&emsp;;;<br>&emsp;&emsp;s)<br>&emsp;&emsp;sizeInBytes=$OPTARG<br>&emsp;&emsp;;;<br>&emsp;&emsp;m)<br>&emsp;&emsp;volMode=$OPTARG<br>&emsp;&emsp;;;<br>&emsp;esac<br>done<br>mkdir -m 0777 -p ${absolutePath} |
| `configmap.teardown` | Configuration of script to execute teardown operations on each node | #!/bin/sh<br>while getopts "m:s:p:" opt<br>do<br>&emsp;case $opt in <br>&emsp;&emsp;p)<br>&emsp;&emsp;absolutePath=$OPTARG<br>&emsp;&emsp;;;<br>&emsp;&emsp;s)<br>&emsp;&emsp;sizeInBytes=$OPTARG<br>&emsp;&emsp;;;<br>&emsp;&emsp;m)<br>&emsp;&emsp;volMode=$OPTARG<br>&emsp;&emsp;;;<br>&emsp;esac<br>done<br>rm -rf ${absolutePath} |
| `configmap.name` | configmap name | `local-path-config` |
| `configmap.helperPod` | helper pod yaml file | apiVersion: v1<br>kind: Pod<br>metadata:<br>&emsp;name: helper-pod<br>spec:<br>&emsp;containers:<br>&emsp;- name: helper-pod<br>&emsp;&emsp;image: busybox |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
......
......@@ -7,7 +7,7 @@ metadata:
{{ include "local-path-provisioner.labels" . | indent 4 }}
rules:
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims"]
resources: ["nodes", "persistentvolumeclaims", "configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["endpoints", "persistentvolumes", "pods"]
......
......@@ -13,3 +13,6 @@ data:
{{ .Values.configmap.setup | nindent 4 }}
teardown: |-
{{ .Values.configmap.teardown | nindent 4 }}
helperPod.yaml: |-
{{ .Values.configmap.helperPod | nindent 4 }}
......@@ -93,11 +93,52 @@ configmap:
# specify the custom script for setup and teardown
setup: |-
#!/bin/sh
path=$1
mkdir -m 0777 -p ${path}
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
teardown: |-
#!/bin/sh
path=$1
rm -rf ${path}
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
# specify the custom helper pod yaml
helperPod: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: busybox
......@@ -23,10 +23,48 @@ data:
}
setup: |-
#!/bin/sh
path=$1
mkdir -m 0777 -p ${path}
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
teardown: |-
#!/bin/sh
path=$1
rm -rf ${path}
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: busybox
......@@ -15,7 +15,7 @@ metadata:
name: local-path-provisioner-role
rules:
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims"]
resources: ["nodes", "persistentvolumeclaims", "configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["endpoints", "persistentvolumes", "pods"]
......@@ -104,10 +104,48 @@ data:
}
setup: |-
#!/bin/sh
path=$1
mkdir -m 0777 -p ${path}
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
teardown: |-
#!/bin/sh
path=$1
rm -rf ${path}
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: busybox
From centos:7
RUN yum install -y xfsprogs
# Overview
this is an example to enable quota for xfs
# Usage
> 1. build a helper image using the sample dockerfile to replace helper image xxx/storage-xfs-quota:v0.1 at configmap(helperPod.yaml) of debug.yaml.
> 2. use the sample setup and teardown script at configmap of debug.yaml
Notice:
> 1. make sure the path at nodePathMap is the mountpoint of xfs which enables pquota
# debug
```Bash
> git clone https://github.com/rancher/local-path-provisioner.git
> cd local-path-provisioner
> go build
> kubectl apply -f debug.yaml
> ./local-path-provisioner --debug start --namespace=local-path-storage
```
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/opt/local-path-provisioner"]
},
{
"node":"yasker-lp-dev1",
"paths":["/opt/local-path-provisioner", "/data1"]
},
{
"node":"yasker-lp-dev3",
"paths":[]
}
]
}
setup: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
xfsPath=$(dirname "$absolutePath")
pvcName=$(basename "$absolutePath")
mkdir -p ${absolutePath}
# support xfs quota
type=`stat -f -c %T ${xfsPath}`
if [ ${type} == 'xfs' ]; then
echo "support xfs quota"
project=`cat /etc/projects | tail -n 1`
id=`echo ${project%:*}`
if [ ! ${project} ]; then
id=1
else
id=$[${id}+1]
fi
echo "${id}:${absolutePath}" >> /etc/projects
echo "${pvcName}:${id}" >> /etc/projid
xfs_quota -x -c "project -s ${pvcName}"
xfs_quota -x -c "limit -p bhard=${sizeInBytes} ${pvcName}" ${xfsPath}
xfs_quota -x -c "report -pbih" ${xfsPath}
fi
teardown: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
xfsPath=$(dirname "$absolutePath")
pvcName=$(basename "$absolutePath")
# support xfs quota
type=`stat -f -c %T ${xfsPath}`
if [ ${type} == 'xfs' ]; then
echo "support xfs quota"
xfs_quota -x -c "limit -p bhard=0 ${pvcName}" ${xfsPath}
fi
rm -rf ${absolutePath}
if [ ${type} == 'xfs' ]; then
echo "$(sed "/${pvcName}/d" /etc/projects)" > /etc/projects
echo "$(sed "/${pvcName}/d" /etc/projid)" > /etc/projid
xfs_quota -x -c "report -pbih" ${xfsPath}
fi
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: xxx/storage-xfs-quota:v0.1
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
- name: xfs-quota-projects
subPath: projects
mountPath: /etc/projects
- name: xfs-quota-projects
subPath: projid
mountPath: /etc/projid
volumes:
- name: xfs-quota-projects
hostPath:
path: /etc
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: xxx/storage-xfs-quota:v0.1
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
- name: xfs-quota-projects
subPath: projects
mountPath: /etc/projects
- name: xfs-quota-projects
subPath: projid
mountPath: /etc/projid
volumes:
- name: xfs-quota-projects
hostPath:
path: /etc
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
xfsPath=$(dirname "$absolutePath")
pvcName=$(basename "$absolutePath")
mkdir -p ${absolutePath}
# support xfs quota
type=`stat -f -c %T ${xfsPath}`
if [ ${type} == 'xfs' ]; then
echo "support xfs quota"
project=`cat /etc/projects | tail -n 1`
id=`echo ${project%:*}`
if [ ! ${project} ]; then
id=1
else
id=$[${id}+1]
fi
echo "${id}:${absolutePath}" >> /etc/projects
echo "${pvcName}:${id}" >> /etc/projid
xfs_quota -x -c "project -s ${pvcName}"
xfs_quota -x -c "limit -p bhard=${sizeInBytes} ${pvcName}" ${xfsPath}
xfs_quota -x -c "report -pbih" ${xfsPath}
fi
\ No newline at end of file
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
xfsPath=$(dirname "$absolutePath")
pvcName=$(basename "$absolutePath")
# support xfs quota
type=`stat -f -c %T ${xfsPath}`
if [ ${type} == 'xfs' ]; then
echo "support xfs quota"
xfs_quota -x -c "limit -p bhard=0 ${pvcName}" ${xfsPath}
fi
rm -rf ${absolutePath}
if [ ${type} == 'xfs' ]; then
echo "$(sed "/${pvcName}/d" /etc/projects)" > /etc/projects
echo "$(sed "/${pvcName}/d" /etc/projid)" > /etc/projid
xfs_quota -x -c "report -pbih" ${xfsPath}
fi
......@@ -18,4 +18,5 @@ require (
k8s.io/apimachinery v0.17.1
k8s.io/client-go v0.17.1
sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.2-0.20200115000635-36885abbb2bd+incompatible
sigs.k8s.io/yaml v1.1.0
)
......@@ -20,7 +20,6 @@ import (
var (
VERSION = "0.0.1"
FlagConfigFile = "config"
FlagProvisionerName = "provisioner-name"
EnvProvisionerName = "PROVISIONER_NAME"
......@@ -38,6 +37,8 @@ var (
DefaultConfigFileKey = "config.json"
DefaultConfigMapName = "local-path-config"
FlagConfigMapName = "configmap-name"
FlagHelperPodFile = "helper-pod-file"
DefaultHelperPodFile = "helperPod.yaml"
)
func cmdNotFound(c *cli.Context, command string) {
......@@ -101,6 +102,11 @@ func StartCmd() cli.Command {
EnvVar: EnvServiceAccountName,
Value: DefaultServiceAccount,
},
cli.StringFlag{
Name: FlagHelperPodFile,
Usage: "Paths to the Helper pod yaml file",
Value: "",
},
},
Action: func(c *cli.Context) {
if err := startDaemon(c); err != nil {
......@@ -140,16 +146,16 @@ func loadConfig(kubeconfig string) (*rest.Config, error) {
return clientcmd.BuildConfigFromFlags("", kubeconfig)
}
func findConfigFileFromConfigMap(kubeClient clientset.Interface, namespace, configMapName string) (string, error) {
func findConfigFileFromConfigMap(kubeClient clientset.Interface, namespace, configMapName, key string) (string, error) {
cm, err := kubeClient.CoreV1().ConfigMaps(namespace).Get(configMapName, metav1.GetOptions{})
if err != nil {
return "", err
}
configFile, ok := cm.Data[DefaultConfigFileKey]
value, ok := cm.Data[key]
if !ok {
return "", fmt.Errorf("%v is not exist in local-path-config ConfigMap", DefaultConfigFileKey)
return "", fmt.Errorf("%v is not exist in local-path-config ConfigMap", key)
}
return configFile, nil
return value, nil
}
func startDaemon(c *cli.Context) error {
......@@ -185,7 +191,7 @@ func startDaemon(c *cli.Context) error {
}
configFile := c.String(FlagConfigFile)
if configFile == "" {
configFile, err = findConfigFileFromConfigMap(kubeClient, namespace, configMapName)
configFile, err = findConfigFileFromConfigMap(kubeClient, namespace, configMapName, DefaultConfigFileKey)
if err != nil {
return fmt.Errorf("invalid empty flag %v and it also does not exist at ConfigMap %v/%v with err: %v", FlagConfigFile, namespace, configMapName, err)
}
......@@ -200,7 +206,23 @@ func startDaemon(c *cli.Context) error {
return fmt.Errorf("invalid empty flag %v", FlagServiceAccountName)
}
provisioner, err := NewProvisioner(stopCh, kubeClient, configFile, namespace, helperImage, configMapName, serviceAccountName)
// if helper pod file is not specified, then find the helper pod by configmap with key = helperPod.yaml
// if helper pod file is specified with flag FlagHelperPodFile, then load the file
helperPodFile := c.String(FlagHelperPodFile)
helperPodYaml := ""
if helperPodFile == "" {
helperPodYaml, err = findConfigFileFromConfigMap(kubeClient, namespace, configMapName, DefaultHelperPodFile)
if err != nil {
return fmt.Errorf("invalid empty flag %v and it also does not exist at ConfigMap %v/%v with err: %v", FlagConfigFile, namespace, configMapName, err)
}
} else {
helperPodYaml, err = loadFile(helperPodFile)
if err != nil {
return fmt.Errorf("could not open file %v with err: %v", helperPodFile, err)
}
}
provisioner, err := NewProvisioner(stopCh, kubeClient, configFile, namespace, helperImage, configMapName, serviceAccountName, helperPodYaml)
if err != nil {
return err
}
......
......@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"time"
......@@ -50,6 +51,7 @@ type LocalPathProvisioner struct {
configFile string
configMapName string
configMutex *sync.RWMutex
helperPod *v1.Pod
}
type NodePathMapData struct {
......@@ -69,7 +71,8 @@ type Config struct {
NodePathMap map[string]*NodePathMap
}
func NewProvisioner(stopCh chan struct{}, kubeClient *clientset.Clientset, configFile, namespace, helperImage, configMapName string, serviceAccountName string) (*LocalPathProvisioner, error) {
func NewProvisioner(stopCh chan struct{}, kubeClient *clientset.Clientset,
configFile, namespace, helperImage, configMapName, serviceAccountName, helperPodYaml string) (*LocalPathProvisioner, error) {
p := &LocalPathProvisioner{
stopCh: stopCh,
......@@ -85,6 +88,11 @@ func NewProvisioner(stopCh chan struct{}, kubeClient *clientset.Clientset, confi
configMapName: configMapName,
configMutex: &sync.RWMutex{},
}
var err error
p.helperPod, err = loadHelperPodFile(helperPodYaml)
if err != nil {
return nil, err
}
if err := p.refreshConfig(); err != nil {
return nil, err
}
......@@ -193,11 +201,14 @@ func (p *LocalPathProvisioner) Provision(opts pvController.ProvisionOptions) (*v
path := filepath.Join(basePath, folderName)
logrus.Infof("Creating volume %v at %v:%v", name, node.Name, path)
storage := pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volMode := string(*pvc.Spec.VolumeMode)
createCmdsForPath := []string{
"/bin/sh",
"/script/setup",
}
if err := p.createHelperPod(ActionTypeCreate, createCmdsForPath, name, path, node.Name); err != nil {
if err := p.createHelperPod(ActionTypeCreate, createCmdsForPath, name, path, node.Name, volMode, storage.Value()); err != nil {
return nil, err
}
......@@ -251,8 +262,10 @@ func (p *LocalPathProvisioner) Delete(pv *v1.PersistentVolume) (err error) {
}
if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimRetain {
logrus.Infof("Deleting volume %v at %v:%v", pv.Name, node, path)
storage := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
volMode := string(*pv.Spec.VolumeMode)
cleanupCmdsForPath := []string{"/bin/sh", "/script/teardown"}
if err := p.createHelperPod(ActionTypeDelete, cleanupCmdsForPath, pv.Name, path, node); err != nil {
if err := p.createHelperPod(ActionTypeDelete, cleanupCmdsForPath, pv.Name, path, node, volMode, storage.Value()); err != nil {
logrus.Infof("clean up volume %v failed: %v", pv.Name, err)
return err
}
......@@ -303,7 +316,7 @@ func (p *LocalPathProvisioner) getPathAndNodeForPV(pv *v1.PersistentVolume) (pat
return path, node, nil
}
func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath []string, name, path, node string) (err error) {
func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath []string, name, path, node, volumeMode string, sizeInBytes int64) (err error) {
defer func() {
err = errors.Wrapf(err, "failed to %v volume %v", action, name)
}()
......@@ -322,43 +335,8 @@ func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath []
// it covers the `/` case
return fmt.Errorf("invalid path %v for %v: cannot find parent dir or volume dir", action, path)
}
hostPathType := v1.HostPathDirectoryOrCreate
helperPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: string(action) + "-" + name,
Namespace: p.namespace,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
NodeName: node,
ServiceAccountName: p.serviceAccountName,
Tolerations: []v1.Toleration{
{
Operator: v1.TolerationOpExists,
},
},
Containers: []v1.Container{
{
Name: "local-path-" + string(action),
Image: p.helperImage,
Command: append(cmdsForPath, filepath.Join("/data/", volumeDir)),
VolumeMounts: []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: "/data/",
},
{
Name: "script",
ReadOnly: false,
MountPath: "/script",
},
},
ImagePullPolicy: v1.PullIfNotPresent,
},
},
Volumes: []v1.Volume{
lpvVolumes := []v1.Volume{
{
Name: "data",
VolumeSource: v1.VolumeSource{
......@@ -388,9 +366,37 @@ func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath []
},
},
},
}
lpvVolumeMounts := []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: parentDir,
},
{
Name: "script",
ReadOnly: false,
MountPath: "/script",
},
}
lpvTolerations := []v1.Toleration{
{
Operator: v1.TolerationOpExists,
},
}
helperPod := p.helperPod.DeepCopy()
helperPod.Namespace = p.namespace
helperPod.Spec.NodeName = node
helperPod.Spec.ServiceAccountName = p.serviceAccountName
helperPod.Spec.RestartPolicy = v1.RestartPolicyNever
helperPod.Spec.Tolerations = append(helperPod.Spec.Tolerations, lpvTolerations...)
helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, lpvVolumes...)
helperPod.Spec.Containers[0].VolumeMounts = append(helperPod.Spec.Containers[0].VolumeMounts, lpvVolumeMounts...)
helperPod.Spec.Containers[0].Command = cmdsForPath
helperPod.Spec.Containers[0].Args = []string{"-p", filepath.Join(parentDir, volumeDir),
"-s", strconv.FormatInt(sizeInBytes, 10),
"-m", volumeMode}
// If it already exists due to some previous errors, the pod will be cleaned up later automatically
// https://github.com/rancher/local-path-provisioner/issues/27
......
util.go 0 → 100644
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/yaml"
)
func loadFile(filepath string) (string, error) {
f, err := os.Open(filepath)
if err != nil {
return "", err
}
defer f.Close()
helperPodYaml, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
return string(helperPodYaml), nil
}
func loadHelperPodFile(helperPodYaml string) (*v1.Pod, error) {
helperPodJSON, err := yaml.YAMLToJSON([]byte(helperPodYaml))
if err != nil {
return nil, fmt.Errorf("invalid YAMLToJSON the helper pod with helperPodYaml: %v", helperPodYaml)
}
p := v1.Pod{}
err = json.Unmarshal(helperPodJSON, &p)
if err != nil {
return nil, fmt.Errorf("invalid unmarshal the helper pod with helperPodJson: %v", string(helperPodJSON))
}
return &p, nil
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment