Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pvc directory can not be deleted when I use NODE_HOST_PATH to set a custom directory as my hostpath mount point,such as /data #9

Open
xautlmx opened this issue Aug 26, 2021 · 3 comments
Labels
bug Something isn't working

Comments

@xautlmx
Copy link

xautlmx commented Aug 26, 2021

when I use NODE_HOST_PATH to set a custom directory as my hostpath mount point,such as /data. If i create pvc and pod by the yaml file below.The pvc directory is created normal in /data.but when I delete the pod and pvc, The pvc directory is still exists.

[root@easyk8s1 ~]# kubectl  get sc
NAME                            PROVISIONER      RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
hostpath                        hostpath         Delete          Immediate           false                  15m
managed-nfs-storage (default)   fuseim.pri/ifs   Delete          Immediate           false                  11d

[root@easyk8s1 ~]# cat /root/hostpath-pvc.yaml 
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-hostpath-pvc
spec:
  storageClassName: "hostpath"
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 100Mi

---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: test-deployment
  labels: 
    app: test-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: test-deployment
  template:
    metadata:
      labels:
        app: test-deployment
    spec:
      containers:
      - name: test-deployment
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
            - name: http
              containerPort: 80
              protocol: TCP
        volumeMounts:
          - name: hostpath-pvc
            mountPath: "/usr/share/nginx/html"
      volumes:
        - name: hostpath-pvc
          persistentVolumeClaim:
            claimName: test-hostpath-pvc

[root@easyk8s1 ~]# kubectl  get deployment -n hostpath-provisioner my-hostpath-provisioner  -o yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  annotations:
    deployment.kubernetes.io/revision: "1"
  creationTimestamp: "2021-08-26T12:49:01Z"
  generation: 1
  labels:
    app.kubernetes.io/instance: my-hostpath-provisioner
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/name: hostpath-provisioner
    helm.sh/chart: hostpath-provisioner-0.2.11
  managedFields:
  - apiVersion: apps/v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:labels:
          .: {}
          f:app.kubernetes.io/instance: {}
          f:app.kubernetes.io/managed-by: {}
          f:app.kubernetes.io/name: {}
          f:helm.sh/chart: {}
      f:spec:
        f:progressDeadlineSeconds: {}
        f:replicas: {}
        f:revisionHistoryLimit: {}
        f:selector: {}
        f:strategy:
          f:type: {}
        f:template:
          f:metadata:
            f:labels:
              .: {}
              f:app.kubernetes.io/instance: {}
              f:app.kubernetes.io/name: {}
          f:spec:
            f:containers:
              k:{"name":"hostpath-provisioner"}:
                .: {}
                f:env:
                  .: {}
                  k:{"name":"HOSTPATH_PROVISIONER_NAME"}:
                    .: {}
                    f:name: {}
                    f:value: {}
                  k:{"name":"NODE_HOST_PATH"}:
                    .: {}
                    f:name: {}
                    f:value: {}
                  k:{"name":"NODE_NAME"}:
                    .: {}
                    f:name: {}
                    f:valueFrom:
                      .: {}
                      f:fieldRef:
                        .: {}
                        f:apiVersion: {}
                        f:fieldPath: {}
                f:image: {}
                f:imagePullPolicy: {}
                f:name: {}
                f:resources:
                  .: {}
                  f:limits:
                    .: {}
                    f:cpu: {}
                    f:memory: {}
                  f:requests:
                    .: {}
                    f:cpu: {}
                    f:memory: {}
                f:terminationMessagePath: {}
                f:terminationMessagePolicy: {}
                f:volumeMounts:
                  .: {}
                  k:{"mountPath":"/mnt/hostpath"}:
                    .: {}
                    f:mountPath: {}
                    f:name: {}
            f:dnsPolicy: {}
            f:restartPolicy: {}
            f:schedulerName: {}
            f:securityContext: {}
            f:serviceAccount: {}
            f:serviceAccountName: {}
            f:terminationGracePeriodSeconds: {}
            f:volumes:
              .: {}
              k:{"name":"pv-volume"}:
                .: {}
                f:hostPath:
                  .: {}
                  f:path: {}
                  f:type: {}
                f:name: {}
    manager: Go-http-client
    operation: Update
    time: "2021-08-26T12:49:01Z"
  - apiVersion: apps/v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          .: {}
          f:deployment.kubernetes.io/revision: {}
      f:status:
        f:availableReplicas: {}
        f:conditions:
          .: {}
          k:{"type":"Available"}:
            .: {}
            f:lastTransitionTime: {}
            f:lastUpdateTime: {}
            f:message: {}
            f:reason: {}
            f:status: {}
            f:type: {}
          k:{"type":"Progressing"}:
            .: {}
            f:lastTransitionTime: {}
            f:lastUpdateTime: {}
            f:message: {}
            f:reason: {}
            f:status: {}
            f:type: {}
        f:observedGeneration: {}
        f:readyReplicas: {}
        f:replicas: {}
        f:updatedReplicas: {}
    manager: kube-controller-manager
    operation: Update
    time: "2021-08-26T12:49:02Z"
  name: my-hostpath-provisioner
  namespace: hostpath-provisioner
  resourceVersion: "143592"
  selfLink: /apis/apps/v1/namespaces/hostpath-provisioner/deployments/my-hostpath-provisioner
  uid: 8b07a84e-ceb3-48ca-8148-43e9077b1911
spec:
  progressDeadlineSeconds: 600
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app.kubernetes.io/instance: my-hostpath-provisioner
      app.kubernetes.io/name: hostpath-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      creationTimestamp: null
      labels:
        app.kubernetes.io/instance: my-hostpath-provisioner
        app.kubernetes.io/name: hostpath-provisioner
    spec:
      containers:
      - env:
        - name: NODE_NAME
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: spec.nodeName
        - name: NODE_HOST_PATH
          value: /data
        - name: HOSTPATH_PROVISIONER_NAME
          value: hostpath
        image: quay.io/rimusz/hostpath-provisioner:v0.2.3
        imagePullPolicy: IfNotPresent
        name: hostpath-provisioner
        resources:
          limits:
            cpu: 100m
            memory: 128Mi
          requests:
            cpu: 100m
            memory: 128Mi
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        volumeMounts:
        - mountPath: /mnt/hostpath
          name: pv-volume
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      serviceAccount: my-hostpath-provisioner
      serviceAccountName: my-hostpath-provisioner
      terminationGracePeriodSeconds: 30
      volumes:
      - hostPath:
          path: /data
          type: ""
        name: pv-volume
status:
  availableReplicas: 1
  conditions:
  - lastTransitionTime: "2021-08-26T12:49:02Z"
    lastUpdateTime: "2021-08-26T12:49:02Z"
    message: Deployment has minimum availability.
    reason: MinimumReplicasAvailable
    status: "True"
    type: Available
  - lastTransitionTime: "2021-08-26T12:49:01Z"
    lastUpdateTime: "2021-08-26T12:49:02Z"
    message: ReplicaSet "my-hostpath-provisioner-75d6bb5868" has successfully progressed.
    reason: NewReplicaSetAvailable
    status: "True"
    type: Progressing
  observedGeneration: 1
  readyReplicas: 1
  replicas: 1
  updatedReplicas: 1

[root@easyk8s1 ~]# kubectl apply -f /root/hostpath-pvc.yaml 
persistentvolumeclaim/test-hostpath-pvc created
deployment.apps/test-deployment created

[root@easyk8s1 ~]# kubectl get pvc
NAME                STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
test-hostpath-pvc   Bound    pvc-51b85064-d158-44cc-a0d5-10f7ad286407   100Mi      RWO            hostpath       14s

[root@easyk8s1 ~]# ls /data/
pvc-51b85064-d158-44cc-a0d5-10f7ad286407

[root@easyk8s1 ~]# kubectl delete -f /root/hostpath-pvc.yaml 
persistentvolumeclaim "test-hostpath-pvc" deleted
deployment.apps "test-deployment" deleted

[root@easyk8s1 ~]# ls /data/
pvc-51b85064-d158-44cc-a0d5-10f7ad286407

[root@easyk8s1 ~]# kubectl  logs -n hostpath-provisioner my-hostpath-provisioner-75d6bb5868-rgqpl
I0826 13:08:36.341866       1 controller.go:926] provision "default/test-hostpath-pvc" class "hostpath": started
I0826 13:08:36.357548       1 controller.go:1026] provision "default/test-hostpath-pvc" class "hostpath": volume "pvc-51b85064-d158-44cc-a0d5-10f7ad286407" provisioned
I0826 13:08:36.357653       1 controller.go:1040] provision "default/test-hostpath-pvc" class "hostpath": trying to save persistentvolume "pvc-51b85064-d158-44cc-a0d5-10f7ad286407"
I0826 13:08:36.358289       1 event.go:221] Event(v1.ObjectReference{Kind:"PersistentVolumeClaim", Namespace:"default", Name:"test-hostpath-pvc", UID:"51b85064-d158-44cc-a0d5-10f7ad286407", APIVersion:"v1", ResourceVersion:"146892", FieldPath:""}): type: 'Normal' reason: 'Provisioning' External provisioner is provisioning volume for claim "default/test-hostpath-pvc"
I0826 13:08:36.374979       1 controller.go:1047] provision "default/test-hostpath-pvc" class "hostpath": persistentvolume "pvc-51b85064-d158-44cc-a0d5-10f7ad286407" saved
I0826 13:08:36.375018       1 controller.go:1088] provision "default/test-hostpath-pvc" class "hostpath": succeeded
I0826 13:08:36.375078       1 event.go:221] Event(v1.ObjectReference{Kind:"PersistentVolumeClaim", Namespace:"default", Name:"test-hostpath-pvc", UID:"51b85064-d158-44cc-a0d5-10f7ad286407", APIVersion:"v1", ResourceVersion:"146892", FieldPath:""}): type: 'Normal' reason: 'ProvisioningSucceeded' Successfully provisioned volume pvc-51b85064-d158-44cc-a0d5-10f7ad286407
I0826 13:09:22.030954       1 controller.go:1097] delete "pvc-51b85064-d158-44cc-a0d5-10f7ad286407": started
I0826 13:09:22.035694       1 controller.go:1125] delete "pvc-51b85064-d158-44cc-a0d5-10f7ad286407": volume deleted
I0826 13:09:22.041431       1 controller.go:1135] delete "pvc-51b85064-d158-44cc-a0d5-10f7ad286407": persistentvolume deleted
I0826 13:09:22.041447       1 controller.go:1137] delete "pvc-51b85064-d158-44cc-a0d5-10f7ad286407": succeeded
@rimusz rimusz added the bug Something isn't working label Sep 8, 2021
@tbnguyen1407
Copy link

tbnguyen1407 commented Oct 4, 2021

Hello, any update on this. I ran into same issue using custom NODE_HOST_PATH. For some reason, only /mnt/hostpath produces correct permissions (0777), using any other path e.g /mnt/mypath produces wrong permissions (0755).

An observation is that when using default /mnt/hostpath and creating a pvc, the folder is created automatically, while a custom path will not create the folder. This explains the different permissions as in default case, the provisioner is creating the folder with umask 0, while in custom case, the pod is creating the folder with default umask 0022.

Reproduction

(1) With default value

// deploy provisioner
$ helm upgrade hostpath-provisioner rimusz/hostpath-provisioner --install

// deploy pvc
$ kubectl create -f https://raw.githubusercontent.com/rimusz/hostpath-provisioner/master/deploy/test-claim.yaml

// pv folder is created with correct permissions
$ ll /mnt/hostpath
drwxrwxrwx 2 root root 4096 Oct  4 05:27 pvc-df22fb40-d0b8-45fc-a373-7f7841f32ac3/

(2) With custom value

// deploy provisioner
$ helm upgrade hostpath-provisioner rimusz/hostpath-provisioner --install --set nodeHostPath=/mnt/mypath

// deploy pvc
$ kubectl create -f https://raw.githubusercontent.com/rimusz/hostpath-provisioner/master/deploy/test-claim.yaml

// pv folder is not created
$ ll /mnt/mypath
<empty>

// deploy test-pod
$ kubectl create -f https://raw.githubusercontent.com/rimusz/hostpath-provisioner/master/deploy/test-pod.yaml

// pv folder is created with wrong permissions
$ ll /mnt/mypath
drwxr-xr-x  2 root   root   4096 Oct  4 05:18 pvc-f8fe8d17-0593-474d-b0b3-4985d206e124/

Expectation

Folder should always be created by provisioner with relaxed permissions when using custom node hostpath.

@seleem-aeyelabs
Copy link

seleem-aeyelabs commented Nov 2, 2021

Hello,
I have faced the same problem, and I found a solution.
You must change the mount path inside the pod with the same path outside the pod.
Here is the complete yaml:

$ helm template hostpath .
---
# Source: hostpath-provisioner/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: hostpath-hostpath-provisioner
  labels:
    app.kubernetes.io/name: hostpath-provisioner
    helm.sh/chart: hostpath-provisioner-0.2.13
    app.kubernetes.io/instance: hostpath
    app.kubernetes.io/managed-by: Helm
---
# Source: hostpath-provisioner/templates/storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: hostpath
  labels:
    app.kubernetes.io/name: hostpath-provisioner
    helm.sh/chart: hostpath-provisioner-0.2.13
    app.kubernetes.io/instance: hostpath
    app.kubernetes.io/managed-by: Helm
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: hostpath
reclaimPolicy: Delete
---
# Source: hostpath-provisioner/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: hostpath-hostpath-provisioner
  labels:
    app.kubernetes.io/name: hostpath-provisioner
    helm.sh/chart: hostpath-provisioner-0.2.13
    app.kubernetes.io/instance: hostpath
    app.kubernetes.io/managed-by: Helm
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
# Source: hostpath-provisioner/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: hostpath-hostpath-provisioner
  labels:
    app.kubernetes.io/name: hostpath-provisioner
    helm.sh/chart: hostpath-provisioner-0.2.13
    app.kubernetes.io/instance: hostpath
    app.kubernetes.io/managed-by: Helm
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: hostpath-hostpath-provisioner
subjects:
  - kind: ServiceAccount
    name: hostpath-hostpath-provisioner
    namespace: default
---
# Source: hostpath-provisioner/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: hostpath-hostpath-provisioner-leader-locking
  labels:
    app.kubernetes.io/name: hostpath-provisioner
    helm.sh/chart: hostpath-provisioner-0.2.13
    app.kubernetes.io/instance: hostpath
    app.kubernetes.io/managed-by: Helm
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "update", "patch"]
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["list", "watch", "create"]
---
# Source: hostpath-provisioner/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: hostpath-hostpath-provisioner-leader-locking
  labels:
    app.kubernetes.io/name: hostpath-provisioner
    helm.sh/chart: hostpath-provisioner-0.2.13
    app.kubernetes.io/instance: hostpath
    app.kubernetes.io/managed-by: Helm
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: hostpath-hostpath-provisioner-leader-locking
subjects:
  - kind: ServiceAccount
    name: hostpath-hostpath-provisioner
    namespace: default
---
# Source: hostpath-provisioner/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: hostpath-hostpath-provisioner
  labels:
    app.kubernetes.io/name: hostpath-provisioner
    helm.sh/chart: hostpath-provisioner-0.2.13
    app.kubernetes.io/instance: hostpath
    app.kubernetes.io/managed-by: Helm
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app.kubernetes.io/name: hostpath-provisioner
      app.kubernetes.io/instance: hostpath
  template:
    metadata:
      labels:
        app.kubernetes.io/name: hostpath-provisioner
        app.kubernetes.io/instance: hostpath
    spec:
      serviceAccountName: hostpath-hostpath-provisioner
      containers:
        - name: hostpath-provisioner
          image: "quay.io/rimusz/hostpath-provisioner:v0.2.5"
          imagePullPolicy: IfNotPresent
          env:
            - name: NODE_NAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: NODE_HOST_PATH
              value: "/Data/Volumes"
            - name: HOSTPATH_PROVISIONER_NAME
              value: "hostpath"
          volumeMounts:
            - name: pv-volume
              mountPath: /Data/Volumes
          resources:
            limits:
              cpu: 100m
              memory: 128Mi
            requests:
              cpu: 100m
              memory: 128Mi
      volumes:
        - name: pv-volume
          hostPath:
            path: /Data/Volumes

To wrap up: change the volumeMounts.pv-volume.mountPath to be same as volumes.pv-volume.hostPath in hostpath-provisioner/templates/deployment.yaml (eg. /Data/Volumes).

@nikashitsa
Copy link

Bug still exist.
My solution for custom directory /media/default-storage:

kubectl patch deployment hostpath-provisioner -n kube-system --patch-file hostpath-provisioner.patch.yaml

# hostpath-provisioner.patch.yaml
apiVersion: apps/v1
kind: Deployment
spec:
  template:
    spec:
      containers:
      - env:
        - name: PV_DIR
          value: /media/default-storage
        name: hostpath-provisioner
        volumeMounts:
        - mountPath: /media/default-storage
          name: pv-volume
      volumes:
      - hostPath:
          path: /media/default-storage
        name: pv-volume

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
bug Something isn't working
Projects
None yet
Development

No branches or pull requests

5 participants