Commit feac941e by tingweiwang

nfs storage class

0 parents
Showing with 6164 additions and 0 deletions
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
nfs-client-provisioner
# v2.0.1
- Add support for ARM (Raspberry PI). Image at `quay.io/external_storage/nfs-client-provisioner-arm`. (https://github.com/kubernetes-incubator/external-storage/pull/275)
# v2.0.0
- Fix issue 149 - nfs-client-provisioner create folder with 755, not 777 (https://github.com/kubernetes-incubator/external-storage/pull/150)
# v1
- Initial release
\ No newline at end of file
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ifeq ($(REGISTRY),)
REGISTRY = quay.io/external_storage/
endif
ifeq ($(VERSION),)
VERSION = latest
endif
IMAGE = $(REGISTRY)nfs-client-provisioner:$(VERSION)
IMAGE_ARM = $(REGISTRY)nfs-client-provisioner-arm:$(VERSION)
MUTABLE_IMAGE = $(REGISTRY)nfs-client-provisioner:latest
MUTABLE_IMAGE_ARM = $(REGISTRY)nfs-client-provisioner-arm:latest
all: build image build_arm image_arm
container: build image build_arm image_arm
build:
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o docker/x86_64/nfs-client-provisioner ./cmd/nfs-client-provisioner
build_arm:
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -a -ldflags '-extldflags "-static"' -o docker/arm/nfs-client-provisioner ./cmd/nfs-client-provisioner
image:
docker build -t $(MUTABLE_IMAGE) docker/x86_64
docker tag $(MUTABLE_IMAGE) $(IMAGE)
image_arm:
docker run --rm --privileged multiarch/qemu-user-static:register --reset
docker build -t $(MUTABLE_IMAGE_ARM) docker/arm
docker tag $(MUTABLE_IMAGE_ARM) $(IMAGE_ARM)
push:
docker push $(IMAGE)
docker push $(MUTABLE_IMAGE)
docker push $(IMAGE_ARM)
docker push $(MUTABLE_IMAGE_ARM)
approvers:
- jackielii
# Kubernetes NFS-Client Provisioner
[![Docker Repository on Quay](https://quay.io/repository/external_storage/nfs-client-provisioner/status "Docker Repository on Quay")](https://quay.io/repository/external_storage/nfs-client-provisioner)
**nfs-client** is an automatic provisioner that use your *existing and already configured* NFS server to support dynamic provisioning of Kubernetes Persistent Volumes via Persistent Volume Claims. Persistent volumes are provisioned as ``${namespace}-${pvcName}-${pvName}``.
# How to deploy nfs-client to your cluster.
To note again, you must *already* have an NFS Server.
## With Helm
Follow the instructions for the stable helm chart maintained at https://github.com/helm/charts/tree/master/stable/nfs-client-provisioner
The tl;dr is
```bash
$ helm install stable/nfs-client-provisioner --set nfs.server=x.x.x.x --set nfs.path=/exported/path
```
## Without Helm
**Step 1: Get connection information for your NFS server**. Make sure your NFS server is accessible from your Kubernetes cluster and get the information you need to connect to it. At a minimum you will need its hostname.
**Step 2: Get the NFS-Client Provisioner files**. To setup the provisioner you will download a set of YAML files, edit them to add your NFS server's connection information and then apply each with the ``kubectl`` / ``oc`` command.
Get all of the files in the [deploy](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client/deploy) directory of this repository. These instructions assume that you have cloned the [external-storage](https://github.com/kubernetes-incubator/external-storage) repository and have a bash-shell open in the ``nfs-client`` directory.
**Step 3: Setup authorization**. If your cluster has RBAC enabled or you are running OpenShift you must authorize the provisioner. If you are in a namespace/project other than "default" edit `deploy/rbac.yaml`.
Kubernetes:
```sh
# Set the subject of the RBAC objects to the current namespace where the provisioner is being deployed
$ NS=$(kubectl config get-contexts|grep -e "^\*" |awk '{print $5}')
$ NAMESPACE=${NS:-default}
$ sed -i'' "s/namespace:.*/namespace: $NAMESPACE/g" ./deploy/rbac.yaml
$ kubectl create -f deploy/rbac.yaml
```
OpenShift:
On some installations of OpenShift the default admin user does not have cluster-admin permissions. If these commands fail refer to the OpenShift documentation for **User and Role Management** or contact your OpenShift provider to help you grant the right permissions to your admin user.
```sh
# Set the subject of the RBAC objects to the current namespace where the provisioner is being deployed
$ NAMESPACE=`oc project -q`
$ sed -i'' "s/namespace:.*/namespace: $NAMESPACE/g" ./deploy/rbac.yaml
$ oc create -f deploy/rbac.yaml
$ oadm policy add-scc-to-user hostmount-anyuid system:serviceaccount:$NAMESPACE:nfs-client-provisioner
```
**Step 4: Configure the NFS-Client provisioner**
Note: To deploy to an ARM-based environment, use: `deploy/deployment-arm.yaml` instead, otherwise use `deploy/deployment.yaml`.
Next you must edit the provisioner's deployment file to add connection information for your NFS server. Edit `deploy/deployment.yaml` and replace the two occurences of <YOUR NFS SERVER HOSTNAME> with your server's hostname.
```yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: <YOUR NFS SERVER HOSTNAME>
- name: NFS_PATH
value: /var/nfs
volumes:
- name: nfs-client-root
nfs:
server: <YOUR NFS SERVER HOSTNAME>
path: /var/nfs
```
You may also want to change the PROVISIONER_NAME above from ``fuseim.pri/ifs`` to something more descriptive like ``nfs-storage``, but if you do remember to also change the PROVISIONER_NAME in the storage class definition below:
This is `deploy/class.yaml` which defines the NFS-Client's Kubernetes Storage Class:
```yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false" # When set to "false" your PVs will not be archived
# by the provisioner upon deletion of the PVC.
```
**Step 5: Finally, test your environment!**
Now we'll test your NFS provisioner.
Deploy:
```sh
$ kubectl create -f deploy/test-claim.yaml -f deploy/test-pod.yaml
```
Now check your NFS Server for the file `SUCCESS`.
```sh
kubectl delete -f deploy/test-pod.yaml -f deploy/test-claim.yaml
```
Now check the folder has been deleted.
**Step 6: Deploying your own PersistentVolumeClaims**. To deploy your own PVC, make sure that you have the correct `storage-class` as indicated by your `deploy/class.yaml` file.
For example:
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
```
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 10.10.10.60
- name: NFS_PATH
value: /ifs/kubernetes
volumes:
- name: nfs-client-root
nfs:
server: 10.10.10.60
path: /ifs/kubernetes
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 10.10.10.60
- name: NFS_PATH
value: /ifs/kubernetes
volumes:
- name: nfs-client-root
nfs:
server: 10.10.10.60
path: /ifs/kubernetes
The objects in this directory are the same as in the parent except split up into one file per object for certain users' convenience.
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 10.10.10.60
- name: NFS_PATH
value: /ifs/kubernetes
volumes:
- name: nfs-client-root
nfs:
server: 10.10.10.60
path: /ifs/kubernetes
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 10.10.10.60
- name: NFS_PATH
value: /ifs/kubernetes
volumes:
- name: nfs-client-root
nfs:
server: 10.10.10.60
path: /ifs/kubernetes
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
kind: ServiceAccount
apiVersion: v1
metadata:
name: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: gcr.io/google_containers/busybox:1.24
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/SUCCESS && exit 0 || exit 1"
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM hypriot/rpi-alpine:3.6
RUN apk update --no-cache && apk add ca-certificates
COPY nfs-client-provisioner /nfs-client-provisioner
ENTRYPOINT ["/nfs-client-provisioner"]
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM alpine:3.6
RUN apk update --no-cache && apk add ca-certificates
COPY nfs-client-provisioner /nfs-client-provisioner
ENTRYPOINT ["/nfs-client-provisioner"]
# v1.0.8
- Add mountOptions StorageClass parameter (#84) (see [Usage](./docs/usage.md) for complete SC parameter info)
- Replace root-squash argument with a rootSquash SC parameter (#86) (see [Usage](./docs/usage.md) for complete SC parameter info)
- If the root-squash argument is specified, the provisioner will fail to start; please if you're using it, convert to the SC parameter before updating!
- Watch for unexpected stop of ganesha.nfsd and restart if seen (#98). This is a simple health check that mitigates NFS ganesha crashes which are under investigation (but probably out of the provisioner's control to prevent at the moment).
# v1.0.7
- Set a high limit for maximum number of files Ganesha may have open (setrlimit RLIMIT_NOFILE) -- this requires the additional SYS_RESOURCE capability, if not available the provisioner will still start but with a warning
# v1.0.6
- Reduce image size by a lot
# v1.0.5
- Add compatibility with kubernetes v1.6.x (using lib v2.0.x)
# v1.0.4
- Add `server-hostname` flag
# Rename kubernetes-incubator/nfs-provisioner to kubernetes-incubator/external-storage
- The previous releases were done when the repo was named nfs-provisioner: http://github.com/kubernetes-incubator/nfs-provisioner/releases. Newer releases done here in external-storage will *not* have corresponding git tags (external-storage's git tags are reserved for versioning the library), so to keep track of releases check this changelog, the [README](README.md), or [Quay](https://quay.io/repository/kubernetes_incubator/nfs-provisioner)
# v1.0.3
- Fix inability to export NFS shares ("1 validation errors in block FSAL") when using Docker's overlay storage driver (CoreOS/container linux, GCE) by patching Ganesha to use device number as fsid. (#63)
- Adds configurable number of retries on failed Provisioner operations. Configurable as an argument to `NewProvisionController`. nfs-provisioner defaults to 10 retries unless the new flag/argument is used. (#65)
# v1.0.2
- Usage demo & how-to for writing your own external PV provisioner added here https://github.com/kubernetes-incubator/nfs-provisioner/tree/master/demo
- Change behaviour for getting NFS server IP from env vars (node, service) in case POD_IP env var is not set when needed. Use `hostname -i` as a fallback only for when running out-of-cluster (#52)
- Pass whole PVC object from controller to `Provision` as part of `VolumeOptions`, like upstream (#48)
- Filter out controller's self-generated race-to-lock leader election PVC updates from being seen as forced resync PVC updates (#58)
- Fix controller's event watching for ending race-to-lock leader elections early. Now correctly discover the latest `ProvisionFailed`/`ProvisionSucceeded` events on a claim (#59)
# v1.0.1
- Add rootsquash flag for enabling/disabling rootsquash https://github.com/kubernetes-incubator/nfs-provisioner/pull/40
# v1.0.0
- Automatically create NFS PVs for any user-defined Storage Class of PVCs, backed by a containerized NFS server that creates & exports shares from some user-defined mounted storage
- Support multiple ways to run:
- standalone Pod, e.g. for easy dynamically provisioned scratch space
- stateful app, either as a StatefulSet or Deployment of 1 replica: the NFS server will survive restarts and its provisioned PVs can be backed by some mounted persistent storage e.g. a hostPath or one big PV
- DaemonSet, where each node runs the NFS server to expose its hostPath storage
- Docker container or binary outside of Kube
- Race-to-lock PVCs: when multiple instances are running & serving the same PVCs, only one attempts to provision for a PVC at a time
- Optionally exponentially backoff from calls to Provision() and Delete()
- Optionally set per-PV filesystem quotas: based on XFS project-level quotas and available only when running outside of Kubernetes (pending mount option support in Kube)
Docker image:
`quay.io/kubernetes_incubator/nfs-provisioner:v1.0.0`
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ifeq ($(REGISTRY),)
REGISTRY = quay.io/kubernetes_incubator/
endif
ifeq ($(VERSION),)
VERSION = latest
endif
IMAGE = $(REGISTRY)nfs-provisioner:$(VERSION)
MUTABLE_IMAGE = $(REGISTRY)nfs-provisioner:latest
all build:
GO111MODULE=on GOOS=linux go build ./cmd/nfs-provisioner
.PHONY: all build
container: build quick-container
.PHONY: container
quick-container:
cp nfs-provisioner deploy/docker/nfs-provisioner
docker build -t $(MUTABLE_IMAGE) deploy/docker
docker tag $(MUTABLE_IMAGE) $(IMAGE)
.PHONY: quick-container
push: container
docker push $(IMAGE)
docker push $(MUTABLE_IMAGE)
.PHONY: push
test-all: test test-e2e
test:
go test `go list ./... | grep -v 'vendor\|test\|demo'`
.PHONY: test
test-e2e:
cd ./test/e2e; ./test.sh
.PHONY: test-e2e
clean:
rm -f nfs-provisioner
rm -f deploy/docker/nfs-provisioner
rm -rf test/e2e/vendor
.PHONY: clean
approvers:
- wongma7
- jsafrane
# nfs-provisioner
[![Docker Repository on Quay](https://quay.io/repository/kubernetes_incubator/nfs-provisioner/status "Docker Repository on Quay")](https://quay.io/repository/kubernetes_incubator/nfs-provisioner)
```
quay.io/kubernetes_incubator/nfs-provisioner
```
nfs-provisioner is an out-of-tree dynamic provisioner for Kubernetes 1.4+. You can use it to quickly & easily deploy shared storage that works almost anywhere. Or it can help you write your own out-of-tree dynamic provisioner by serving as an example implementation of the requirements detailed in [the proposal](https://github.com/kubernetes/kubernetes/pull/30285).
It works just like in-tree dynamic provisioners: a `StorageClass` object can specify an instance of nfs-provisioner to be its `provisioner` like it specifies in-tree provisioners such as GCE or AWS. Then, the instance of nfs-provisioner will watch for `PersistentVolumeClaims` that ask for the `StorageClass` and automatically create NFS-backed `PersistentVolumes` for them. For more information on how dynamic provisioning works, see [the docs](http://kubernetes.io/docs/user-guide/persistent-volumes/) or [this blog post](http://blog.kubernetes.io/2016/10/dynamic-provisioning-and-storage-in-kubernetes.html).
## Quickstart
Choose some volume for your nfs-provisioner instance to store its state & data in and mount the volume at `/export` in `deploy/kubernetes/deployment.yaml`. It doesn't have to be a `hostPath` volume, it can e.g. be a PVC. Note that the volume must have a [supported file system](https://github.com/nfs-ganesha/nfs-ganesha/wiki/Fsalsupport#vfs) on it: any local filesystem on Linux is supported & NFS is not supported.
```yaml
...
volumeMounts:
- name: export-volume
mountPath: /export
volumes:
- name: export-volume
hostPath:
path: /tmp/nfs-provisioner
...
```
Choose a `provisioner` name for a `StorageClass` to specify and set it in `deploy/kubernetes/deployment.yaml`
```yaml
...
args:
- "-provisioner=example.com/nfs"
...
```
Create the deployment.
```console
$ kubectl create -f deploy/kubernetes/deployment.yaml
serviceaccount/nfs-provisioner created
service "nfs-provisioner" created
deployment "nfs-provisioner" created
```
Create `ClusterRole`, `ClusterRoleBinding`, `Role` and `RoleBinding` (this is necessary if you use RBAC authorization on your cluster, which is the default for newer kubernetes versions).
```console
$ kubectl create -f deploy/kubernetes/rbac.yaml
clusterrole.rbac.authorization.k8s.io/nfs-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created
```
Create a `StorageClass` named "example-nfs" with `provisioner: example.com/nfs`.
```console
$ kubectl create -f deploy/kubernetes/class.yaml
storageclass "example-nfs" created
```
Create a `PersistentVolumeClaim` with annotation `volume.beta.kubernetes.io/storage-class: "example-nfs"`
```console
$ kubectl create -f deploy/kubernetes/claim.yaml
persistentvolumeclaim "nfs" created
```
A `PersistentVolume` is provisioned for the `PersistentVolumeClaim`. Now the claim can be consumed by some pod(s) and the backing NFS storage read from or written to.
```console
$ kubectl get pv
NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM REASON AGE
pvc-dce84888-7a9d-11e6-b1ee-5254001e0c1b 1Mi RWX Delete Bound default/nfs 23s
```
Deleting the `PersistentVolumeClaim` will cause the provisioner to delete the `PersistentVolume` and its data.
Deleting the provisioner deployment will cause any outstanding `PersistentVolumes` to become unusable for as long as the provisioner is gone.
## Running
To deploy nfs-provisioner on a Kubernetes cluster see [Deployment](docs/deployment.md).
To use nfs-provisioner once it is deployed see [Usage](docs/usage.md).
## [Changelog](CHANGELOG.md)
Releases done here in external-storage will not have corresponding git tags (external-storage's git tags are reserved for versioning the library), so to keep track of releases check this README, the [changelog](CHANGELOG.md), or [Quay](https://quay.io/repository/kubernetes_incubator/nfs-provisioner)
## Writing your own
Go [here](../docs/demo/hostpath-provisioner) for an example of how to write your own out-of-tree dynamic provisioner.
## Roadmap
This is still alpha/experimental and will change to reflect the [out-of-tree dynamic provisioner proposal](https://github.com/kubernetes/kubernetes/pull/30285)
## Community, discussion, contribution, and support
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
You can reach the maintainers of this project at:
- Slack: #sig-storage
## Kubernetes Incubator
This is a [Kubernetes Incubator project](https://github.com/kubernetes/community/blob/master/incubator.md). The project was established 2016-11-15. The incubator team for the project is:
- Sponsor: Clayton (@smarterclayton)
- Champion: Brad (@childsb)
- SIG: sig-storage
### Code of conduct
Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
# Release Process
nfs-provisioner is released on an as-needed basis. The process is as follows:
1. An issue is proposing a new release with a changelog since the last release
1. An OWNER runs `make test` to make sure tests pass
1. An OWNER runs `git tag -a $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
1. An OWNER runs `make push` to build and push the image
1. The release issue is closed
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"strings"
"time"
"github.com/golang/glog"
"github.com/kubernetes-incubator/external-storage/nfs/pkg/server"
vol "github.com/kubernetes-incubator/external-storage/nfs/pkg/volume"
"github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
provisioner = flag.String("provisioner", "example.com/nfs", "Name of the provisioner. The provisioner will only provision volumes for claims that request a StorageClass with a provisioner field set equal to this name.")
master = flag.String("master", "", "Master URL to build a client config from. Either this or kubeconfig needs to be set if the provisioner is being run out of cluster.")
kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig file. Either this or master needs to be set if the provisioner is being run out of cluster.")
runServer = flag.Bool("run-server", true, "If the provisioner is responsible for running the NFS server, i.e. starting and stopping NFS Ganesha. Default true.")
useGanesha = flag.Bool("use-ganesha", true, "If the provisioner will create volumes using NFS Ganesha (D-Bus method calls) as opposed to using the kernel NFS server ('exportfs'). If run-server is true, this must be true. Default true.")
gracePeriod = flag.Uint("grace-period", 90, "NFS Ganesha grace period to use in seconds, from 0-180. If the server is not expected to survive restarts, i.e. it is running as a pod & its export directory is not persisted, this can be set to 0. Can only be set if both run-server and use-ganesha are true. Default 90.")
enableXfsQuota = flag.Bool("enable-xfs-quota", false, "If the provisioner will set xfs quotas for each volume it provisions. Requires that the directory it creates volumes in ('/export') is xfs mounted with option prjquota/pquota, and that it has the privilege to run xfs_quota. Default false.")
serverHostname = flag.String("server-hostname", "", "The hostname for the NFS server to export from. Only applicable when running out-of-cluster i.e. it can only be set if either master or kubeconfig are set. If unset, the first IP output by `hostname -i` is used.")
exportSubnet = flag.String("export-subnet", "*", "Subnet for NFS export to allow mount only from")
maxExports = flag.Int("max-exports", -1, "The maximum number of volumes to be exported by this provisioner. New claims will be ignored once this limit has been reached. A negative value is interpreted as 'unlimited'. Default -1.")
fsidDevice = flag.Bool("device-based-fsids", true, "If file system handles created by NFS Ganesha should be based on major/minor device IDs of the backing storage volume ('/export'). Default true.")
)
const (
exportDir = "/export"
ganeshaLog = "/export/ganesha.log"
ganeshaPid = "/var/run/ganesha.pid"
ganeshaConfig = "/export/vfs.conf"
)
func main() {
flag.Set("logtostderr", "true")
flag.Parse()
if errs := validateProvisioner(*provisioner, field.NewPath("provisioner")); len(errs) != 0 {
glog.Fatalf("Invalid provisioner specified: %v", errs)
}
glog.Infof("Provisioner %s specified", *provisioner)
if *runServer && !*useGanesha {
glog.Fatalf("Invalid flags specified: if run-server is true, use-ganesha must also be true.")
}
if *useGanesha && *exportSubnet != "*" {
glog.Warningf("If use-ganesha is true, there is no effect on export-subnet.")
}
if *gracePeriod != 90 && (!*runServer || !*useGanesha) {
glog.Fatalf("Invalid flags specified: custom grace period can only be set if both run-server and use-ganesha are true.")
} else if *gracePeriod > 180 && *runServer && *useGanesha {
glog.Fatalf("Invalid flags specified: custom grace period must be in the range 0-180")
}
// Create the client according to whether we are running in or out-of-cluster
outOfCluster := *master != "" || *kubeconfig != ""
if !outOfCluster && *serverHostname != "" {
glog.Fatalf("Invalid flags specified: if server-hostname is set, either master or kube-config must also be set.")
}
if *runServer {
glog.Infof("Setting up NFS server!")
err := server.Setup(ganeshaConfig, *gracePeriod, *fsidDevice)
if err != nil {
glog.Fatalf("Error setting up NFS server: %v", err)
}
go func() {
for {
// This blocks until server exits (presumably due to an error)
err = server.Run(ganeshaLog, ganeshaPid, ganeshaConfig)
if err != nil {
glog.Errorf("NFS server Exited Unexpectedly with err: %v", err)
}
// take a moment before trying to restart
time.Sleep(time.Second)
}
}()
// Wait for NFS server to come up before continuing provisioner process
time.Sleep(5 * time.Second)
}
var config *rest.Config
var err error
if outOfCluster {
config, err = clientcmd.BuildConfigFromFlags(*master, *kubeconfig)
} else {
config, err = rest.InClusterConfig()
}
if err != nil {
glog.Fatalf("Failed to create config: %v", err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
}
// The controller needs to know what the server version is because out-of-tree
// provisioners aren't officially supported until 1.5
serverVersion, err := clientset.Discovery().ServerVersion()
if err != nil {
glog.Fatalf("Error getting server version: %v", err)
}
// Create the provisioner: it implements the Provisioner interface expected by
// the controller
nfsProvisioner := vol.NewNFSProvisioner(exportDir, clientset, outOfCluster, *useGanesha, ganeshaConfig, *enableXfsQuota, *serverHostname, *maxExports, *exportSubnet)
// Start the provision controller which will dynamically provision NFS PVs
pc := controller.NewProvisionController(
clientset,
*provisioner,
nfsProvisioner,
serverVersion.GitVersion,
)
pc.Run(wait.NeverStop)
}
// validateProvisioner tests if provisioner is a valid qualified name.
// https://github.com/kubernetes/kubernetes/blob/release-1.4/pkg/apis/storage/validation/validation.go
func validateProvisioner(provisioner string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(provisioner) == 0 {
allErrs = append(allErrs, field.Required(fldPath, provisioner))
}
if len(provisioner) > 0 {
for _, msg := range validation.IsQualifiedName(strings.ToLower(provisioner)) {
allErrs = append(allErrs, field.Invalid(fldPath, provisioner, msg))
}
}
return allErrs
}
Please see [Deployment](../docs/deployment.md) for how to deploy nfs-provisioner on a Kubernetes cluster using these files.
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from https://github.com/rootfs/nfs-ganesha-docker by Huamin Chen
FROM fedora:30 AS build
# Build ganesha from source, install it to /usr/local and a use multi stage build to have a smaller image
RUN dnf install -y tar gcc cmake autoconf libtool bison flex make gcc-c++ krb5-devel dbus-devel jemalloc-devel libnfsidmap-devel libnsl2-devel userspace-rcu-devel patch libblkid-devel
RUN curl -L https://github.com/nfs-ganesha/nfs-ganesha/archive/V2.8.2.tar.gz | tar zx \
&& curl -L https://github.com/nfs-ganesha/ntirpc/archive/v1.8.0.tar.gz | tar zx \
&& rm -r nfs-ganesha-2.8.2/src/libntirpc \
&& mv ntirpc-1.8.0 nfs-ganesha-2.8.2/src/libntirpc
WORKDIR /nfs-ganesha-2.8.2
RUN mkdir -p /usr/local \
&& cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_CONFIG=vfs_only -DCMAKE_INSTALL_PREFIX=/usr/local src/ \
&& make \
&& make install
RUN mkdir -p /ganesha-extra \
&& mkdir -p /ganesha-extra/etc/dbus-1/system.d \
&& cp src/scripts/ganeshactl/org.ganesha.nfsd.conf /ganesha-extra/etc/dbus-1/system.d/
FROM registry.fedoraproject.org/fedora-minimal:30 AS run
RUN microdnf install -y libblkid userspace-rcu dbus-x11 rpcbind hostname nfs-utils xfsprogs jemalloc libnfsidmap && microdnf clean all
RUN mkdir -p /var/run/dbus \
&& mkdir -p /export
# add libs from /usr/local/lib64
RUN echo /usr/local/lib64 > /etc/ld.so.conf.d/local_libs.conf
# do not ask systemd for user IDs or groups (slows down dbus-daemon start)
RUN sed -i s/systemd// /etc/nsswitch.conf
COPY --from=build /usr/local /usr/local/
COPY --from=build /ganesha-extra /
COPY nfs-provisioner /nfs-provisioner
# run ldconfig after libs have been copied
RUN ldconfig
# expose mountd 20048/tcp and nfsd 2049/tcp and rpcbind 111/tcp 111/udp
EXPOSE 2049/tcp 20048/tcp 111/tcp 111/udp
ENTRYPOINT ["/nfs-provisioner"]
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: http://0.0.0.0:8080
name: local
contexts:
- context:
cluster: local
user: ""
name: local
current-context: local
kind: Config
preferences: {}
users: []
{
"defaultAction": "SCMP_ACT_ERRNO",
"architectures": [
"SCMP_ARCH_X86_64",
"SCMP_ARCH_X86",
"SCMP_ARCH_X32"
],
"syscalls": [
{
"name": "accept",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "accept4",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "access",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "alarm",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "bind",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "brk",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "capget",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "capset",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "chdir",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "chmod",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "chown",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "chown32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "clock_getres",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "clock_gettime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "clock_nanosleep",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "close",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "connect",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "copy_file_range",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "creat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "dup",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "dup2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "dup3",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_create",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_create1",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_ctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_ctl_old",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_pwait",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_wait",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_wait_old",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "eventfd",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "eventfd2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "execve",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "execveat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "exit",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "exit_group",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "faccessat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fadvise64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fadvise64_64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fallocate",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fanotify_mark",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchdir",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchmod",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchmodat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchown",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchown32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchownat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fcntl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fcntl64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fdatasync",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fgetxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "flistxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "flock",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fork",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fremovexattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fsetxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fstat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fstat64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fstatat64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fstatfs",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fstatfs64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fsync",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ftruncate",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ftruncate64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "futex",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "futimesat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getcpu",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getcwd",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getdents",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getdents64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getegid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getegid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "geteuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "geteuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getgid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getgroups",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getgroups32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getitimer",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getpeername",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getpgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getpgrp",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getpid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getppid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getpriority",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getrandom",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getresgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getresgid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getresuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getresuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getrlimit",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "get_robust_list",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getrusage",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getsid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getsockname",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getsockopt",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "get_thread_area",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "gettid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "gettimeofday",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "inotify_add_watch",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "inotify_init",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "inotify_init1",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "inotify_rm_watch",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "io_cancel",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ioctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "io_destroy",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "io_getevents",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ioprio_get",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ioprio_set",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "io_setup",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "io_submit",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ipc",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "kill",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lchown",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lchown32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lgetxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "link",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "linkat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "listen",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "listxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "llistxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "_llseek",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lremovexattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lseek",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lsetxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lstat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lstat64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "madvise",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "memfd_create",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mincore",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mkdir",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mkdirat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mknod",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mknodat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mlock",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mlock2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mlockall",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mmap",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mmap2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mprotect",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_getsetattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_notify",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_open",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_timedreceive",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_timedsend",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_unlink",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mremap",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "msgctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "msgget",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "msgrcv",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "msgsnd",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "msync",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "munlock",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "munlockall",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "munmap",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "nanosleep",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "newfstatat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "_newselect",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "open",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "openat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pause",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "personality",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 0,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
]
},
{
"name": "personality",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 8,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
]
},
{
"name": "personality",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 4294967295,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
]
},
{
"name": "pipe",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pipe2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "poll",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ppoll",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "prctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pread64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "preadv",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "prlimit64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pselect6",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pwrite64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pwritev",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "read",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "readahead",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "readlink",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "readlinkat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "readv",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "recv",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "recvfrom",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "recvmmsg",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "recvmsg",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "remap_file_pages",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "removexattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rename",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "renameat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "renameat2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "restart_syscall",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rmdir",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigaction",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigpending",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigprocmask",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigqueueinfo",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigreturn",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigsuspend",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigtimedwait",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_tgsigqueueinfo",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_getaffinity",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_getattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_getparam",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_get_priority_max",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_get_priority_min",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_getscheduler",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_rr_get_interval",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_setaffinity",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_setattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_setparam",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_setscheduler",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_yield",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "seccomp",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "select",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "semctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "semget",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "semop",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "semtimedop",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "send",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sendfile",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sendfile64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sendmmsg",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sendmsg",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sendto",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setfsgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setfsgid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setfsuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setfsuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setgid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setgroups",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setgroups32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setitimer",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setpgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setpriority",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setregid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setregid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setresgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setresgid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setresuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setresuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setreuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setreuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setrlimit",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "set_robust_list",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setsid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setsockopt",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "set_thread_area",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "set_tid_address",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "shmat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "shmctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "shmdt",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "shmget",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "shutdown",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sigaltstack",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "signalfd",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "signalfd4",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sigreturn",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "socket",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "socketcall",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "socketpair",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "splice",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "stat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "stat64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "statfs",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "statfs64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "symlink",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "symlinkat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sync",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sync_file_range",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "syncfs",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sysinfo",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "syslog",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "tee",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "tgkill",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "time",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timer_create",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timer_delete",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timerfd_create",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timerfd_gettime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timerfd_settime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timer_getoverrun",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timer_gettime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timer_settime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "times",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "tkill",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "truncate",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "truncate64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ugetrlimit",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "umask",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "uname",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "unlink",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "unlinkat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "utime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "utimensat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "utimes",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "vfork",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "vmsplice",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "wait4",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "waitid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "waitpid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "write",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "writev",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "arch_prctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "modify_ldt",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "chroot",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "clone",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 2080505856,
"valueTwo": 0,
"op": "SCMP_CMP_MASKED_EQ"
}
]
},
{
"name": "name_to_handle_at",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "open_by_handle_at",
"action": "SCMP_ACT_ALLOW",
"args": []
}
]
}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nfs
annotations:
volume.beta.kubernetes.io/storage-class: "example-nfs"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: example-nfs
provisioner: example.com/nfs
mountOptions:
- vers=4.1
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
---
kind: Service
apiVersion: v1
metadata:
name: nfs-provisioner
labels:
app: nfs-provisioner
spec:
ports:
- name: nfs
port: 2049
- name: mountd
port: 20048
- name: rpcbind
port: 111
- name: rpcbind-udp
port: 111
protocol: UDP
selector:
app: nfs-provisioner
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-provisioner
spec:
selector:
matchLabels:
app: nfs-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
serviceAccount: nfs-provisioner
containers:
- name: nfs-provisioner
image: quay.io/kubernetes_incubator/nfs-provisioner:latest
ports:
- name: nfs
containerPort: 2049
- name: mountd
containerPort: 20048
- name: rpcbind
containerPort: 111
- name: rpcbind-udp
containerPort: 111
protocol: UDP
securityContext:
capabilities:
add:
- DAC_READ_SEARCH
- SYS_RESOURCE
args:
- "-provisioner=example.com/nfs"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_NAME
value: nfs-provisioner
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: export-volume
mountPath: /export
volumes:
- name: export-volume
hostPath:
path: /srv
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
---
kind: Pod
apiVersion: v1
metadata:
name: nfs-provisioner
spec:
serviceAccount: nfs-provisioner
containers:
- name: nfs-provisioner
image: quay.io/kubernetes_incubator/nfs-provisioner:latest
ports:
- name: nfs
containerPort: 2049
- name: mountd
containerPort: 20048
- name: rpcbind
containerPort: 111
- name: rpcbind-udp
containerPort: 111
protocol: UDP
securityContext:
capabilities:
add:
- DAC_READ_SEARCH
args:
- "-provisioner=example.com/nfs"
- "-grace-period=0"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: export-volume
mountPath: /export
volumes:
- name: export-volume
emptyDir: {}
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: nfs-provisioner
spec:
fsGroup:
rule: RunAsAny
allowedCapabilities:
- DAC_READ_SEARCH
- SYS_RESOURCE
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- persistentVolumeClaim
- secret
- hostPath
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-provisioner
apiGroup: rbac.authorization.k8s.io
kind: Pod
apiVersion: v1
metadata:
name: read-pod
spec:
containers:
- name: read-pod
image: gcr.io/google_containers/busybox:1.24
command:
- "/bin/sh"
args:
- "-c"
- "test -f /mnt/SUCCESS && exit 0 || exit 1"
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: nfs
allowHostDirVolumePlugin: true
allowHostIPC: false
allowHostNetwork: false
allowHostPID: false
allowHostPorts: false
allowPrivilegedContainer: false
allowedCapabilities:
- DAC_READ_SEARCH
- SYS_RESOURCE
apiVersion: v1
defaultAddCapabilities: null
fsGroup:
type: MustRunAs
kind: SecurityContextConstraints
metadata:
annotations: null
name: nfs-provisioner
priority: null
readOnlyRootFilesystem: false
requiredDropCapabilities:
- KILL
- MKNOD
- SYS_CHROOT
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
supplementalGroups:
type: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- persistentVolumeClaim
- secret
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
---
kind: Service
apiVersion: v1
metadata:
name: nfs-provisioner
labels:
app: nfs-provisioner
spec:
ports:
- name: nfs
port: 2049
- name: mountd
port: 20048
- name: rpcbind
port: 111
- name: rpcbind-udp
port: 111
protocol: UDP
selector:
app: nfs-provisioner
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: nfs-provisioner
spec:
selector:
matchLabels:
app: nfs-provisioner
serviceName: "nfs-provisioner"
replicas: 1
template:
metadata:
labels:
app: nfs-provisioner
spec:
serviceAccount: nfs-provisioner
terminationGracePeriodSeconds: 10
containers:
- name: nfs-provisioner
image: quay.io/kubernetes_incubator/nfs-provisioner:latest
ports:
- name: nfs
containerPort: 2049
- name: mountd
containerPort: 20048
- name: rpcbind
containerPort: 111
- name: rpcbind-udp
containerPort: 111
protocol: UDP
securityContext:
capabilities:
add:
- DAC_READ_SEARCH
- SYS_RESOURCE
args:
- "-provisioner=example.com/nfs"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_NAME
value: nfs-provisioner
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: export-volume
mountPath: /export
volumes:
- name: export-volume
hostPath:
path: /srv
kind: Pod
apiVersion: v1
metadata:
name: write-pod
spec:
containers:
- name: write-pod
image: gcr.io/google_containers/busybox:1.24
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/SUCCESS && exit 0 || exit 1"
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: nfs
# Deployment
## Getting the provisioner image
To get the Docker image onto the machine where you want to run nfs-provisioner, you can either build it or pull the newest release from Quay. You may use the unstable `latest` tag if you wish, but all the example yamls reference the newest versioned release tag.
### Building
Building the project will only work if the project is in your `GOPATH`. Download the project into your `GOPATH` directory by using `go get` or cloning it manually.
```
$ go get github.com/kubernetes-incubator/external-storage
```
Now build the project and the Docker image by checking out the latest release and running `make container` in the project directory.
```
$ cd $GOPATH/src/github.com/kubernetes-incubator/external-storage/nfs
$ make container
```
### Pulling
If you are running in Kubernetes, it will pull the image from Quay for you. Or you can do it yourself.
```
$ docker pull quay.io/kubernetes_incubator/nfs-provisioner:latest
```
## Deploying the provisioner
Now the Docker image is on your machine. Bring up a 1.4+ cluster if you don't have one up already.
```
$ ALLOW_SECURITY_CONTEXT=true API_HOST_IP=0.0.0.0 $GOPATH/src/k8s.io/kubernetes/hack/local-up-cluster.sh
```
Decide on a unique name to give the provisioner that follows the naming scheme `<vendor name>/<provisioner name>` where `<vendor name>` cannot be "kubernetes.io." The provisioner will only provision volumes for claims that request a `StorageClass` with a `provisioner` field set equal to this name. For example, the names of the in-tree GCE and AWS provisioners are `kubernetes.io/gce-pd` and `kubernetes.io/aws-ebs`.
Decide how to run nfs-provisioner and follow one of the below sections. The recommended way is running it as a [single-instance stateful app](http://kubernetes.io/docs/tutorials/stateful-application/run-stateful-application/), where you create a `Deployment`/`StatefulSet` and back it with some persistent storage like a `hostPath` volume. Running outside of Kubernetes as a standalone container or binary is for when you want greater control over the app's lifecycle and/or the ability to set per-PV quotas.
* [In Kubernetes - Deployment](#in-kubernetes---deployment-of-1-replica)
* [In Kubernetes - StatefulSet](#in-kubernetes---statefulset-of-1-replica)
* [Outside of Kubernetes - container](#outside-of-kubernetes---container)
* [Outside of Kubernetes - binary](#outside-of-kubernetes---binary)
### In Kubernetes - Deployment of 1 replica
Edit the `provisioner` argument in the `args` field in `deploy/kubernetes/deployment.yaml` to be the provisioner's name you decided on.
`deploy/kubernetes/deployment.yaml` specifies a `hostPath` volume `/srv` mounted at `/export`. The `/export` directory is where the provisioner stores its state and provisioned `PersistentVolumes'` data, so by mounting a volume there, you specify it as the backing storage for provisioned PVs. You may edit the `hostPath` or even mount some other type of volume at `/export`, like a `PersistentVolumeClaim`. Note that the volume mounted there must have a [supported file system](https://github.com/nfs-ganesha/nfs-ganesha/wiki/Fsalsupport#vfs) on it: any local filesystem on Linux is supported & NFS is not supported.
Note that if you continue with the `hostPath` volume, its path must exist on the node the provisioner is scheduled to, so you may want to use a `nodeSelector` to choose a particular node and ensure the directory exists there: `mkdir -p /srv`. If SELinux is enforcing on the node, you may need to make the container [privileged](http://kubernetes.io/docs/user-guide/security-context/) or change the security context of the directory on the node: `sudo chcon -Rt svirt_sandbox_file_t /srv`.
`deploy/kubernetes/deployment.yaml` also configures a service. The deployment's pod will use the service's cluster IP as the NFS server IP to put on its `PersistentVolumes`, instead of its own unstable pod IP, because the service's name is passed in via the `SERVICE_NAME` env variable. There must always be one service per pod or provisioning will fail, meaning the deployment cannot be scaled beyond 1 replica. To scale (where multiple instances do leader election), new deployment and service pairs need to be created with new names, matching labels+selectors and `SERVICE_NAME` variables.
Create the deployment and its service.
```
$ kubectl create -f deploy/kubernetes/psp.yaml # or if openshift: oc create -f deploy/kubernetes/scc.yaml\
# Set the subject of the RBAC objects to the current namespace where the provisioner is being deployed
$ NAMESPACE=`kubectl config get-contexts | grep '^*' | tr -s ' ' | cut -d' ' -f5`
$ sed -i'' "s/namespace:.*/namespace: $NAMESPACE/g" ./deploy/kubernetes/rbac.yaml
$ kubectl create -f deploy/kubernetes/rbac.yaml
$ kubectl create -f deploy/kubernetes/deployment.yaml
```
### In Kubernetes - StatefulSet of 1 replica
The procedure for running a stateful set is identical to [that for a deployment, above,](#in-kubernetes---deployment-of-1-replica) so wherever you see `deployment` there, replace it with `statefulset`. The benefit is that you get a stable hostname. Note that the service cannot be headless, unlike in most examples of stateful sets.
### Outside of Kubernetes - container
The container is going to need to run with one of `master` or `kubeconfig` set. For the `kubeconfig` argument to work, the config file, and any certificate files it references by path like `certificate-authority: /var/run/kubernetes/apiserver.crt`, need to be inside the container somehow. This can be done by creating Docker volumes, or copying the files into the folder where the Dockerfile is and adding lines like `COPY config /.kube/config` to the Dockerfile before building the image.
Run nfs-provisioner with `provisioner` equal to the name you decided on, and one of `master` or `kubeconfig` set. It needs to be run with capability `DAC_READ_SEARCH` in order for Ganesha to work. Optionally, it should be run also with capability `SYS_RESOURCE` so that it can set a higher limit for the number of opened files Ganesha may have. If you are using Docker 1.10 or newer, it also needs a more permissive seccomp profile: `unconfined` or `deploy/docker/nfs-provisioner-seccomp.json`.
You may want to specify the hostname the NFS server exports from, i.e. the server IP to put on PVs, by setting the `server-hostname` flag.
```
$ docker run --cap-add DAC_READ_SEARCH --cap-add SYS_RESOURCE \
--security-opt seccomp:deploy/docker/nfs-provisioner-seccomp.json \
-v $HOME/.kube:/.kube:Z \
quay.io/kubernetes_incubator/nfs-provisioner:latest \
-provisioner=example.com/nfs \
-kubeconfig=/.kube/config
```
or
```
$ docker run --cap-add DAC_READ_SEARCH --cap-add SYS_RESOURCE \
--security-opt seccomp:deploy/docker/nfs-provisioner-seccomp.json \
quay.io/kubernetes_incubator/nfs-provisioner:latest \
-provisioner=example.com/nfs \
-master=http://172.17.0.1:8080
```
You may want to create & mount a Docker volume at `/export` in the container. The `/export` directory is where the provisioner stores its provisioned `PersistentVolumes'` data, so by mounting a volume there, you specify it as the backing storage for provisioned PVs. The volume can then be reused by another container if the original container stops. Without Kubernetes you will have to manage the lifecycle yourself. You should give the container a stable IP somehow so that it can survive a restart to continue serving the shares in the volume.
You may also want to enable per-PV quota enforcement. It is based on xfs project level quotas and so requires that the volume mounted at `/export` be xfs mounted with the prjquota/pquota option. It also requires that it has the privilege to run `xfs_quota`.
With the two above options, the run command will look something like this.
```
$ docker run --privileged \
-v $HOME/.kube:/.kube:Z \
-v /xfs:/export:Z \
quay.io/kubernetes_incubator/nfs-provisioner:latest \
-provisioner=example.com/nfs \
-kubeconfig=/.kube/config \
-enable-xfs-quota=true
```
### Outside of Kubernetes - binary
Running nfs-provisioner in this way allows it to manipulate exports directly on the host machine. It will create & store all its data at `/export` so ensure the directory exists and is available for use. It runs assuming the host is already running either NFS Ganesha or a kernel NFS server, depending on how the `use-ganesha` flag is set. Use with caution.
Run nfs-provisioner with `provisioner` equal to the name you decided on, one of `master` or `kubeconfig` set, `run-server` set false, and `use-ganesha` set according to how the NFS server is running on the host. It probably needs to be run as root.
You may want to specify the hostname the NFS server exports from, i.e. the server IP to put on PVs, by setting the `server-hostname` flag.
```
$ sudo ./nfs-provisioner -provisioner=example.com/nfs \
-kubeconfig=$HOME/.kube/config \
-run-server=false \
-use-ganesha=false
```
or
```
$ sudo ./nfs-provisioner -provisioner=example.com/nfs \
-master=http://0.0.0.0:8080 \
-run-server=false \
-use-ganesha=false
```
You may want to enable per-PV quota enforcement. It is based on xfs project level quotas and so requires that the volume mounted at `/export` be xfs mounted with the prjquota/pquota option. Add the `-enable-xfs-quota=true` argument to enable it.
```
$ sudo ./nfs-provisioner -provisioner=example.com/nfs \
-kubeconfig=$HOME/.kube/config \
-run-server=false \
-use-ganesha=false \
-enable-xfs-quota=true
```
---
Now that you have finished deploying the provisioner, go to [Usage](usage.md) for info on how to use it.
---
#### Arguments
* `provisioner` - Name of the provisioner. The provisioner will only provision volumes for claims that request a StorageClass with a provisioner field set equal to this name.
* `master` - Master URL to build a client config from. Either this or kubeconfig needs to be set if the provisioner is being run out of cluster.
* `kubeconfig` - Absolute path to the kubeconfig file. Either this or master needs to be set if the provisioner is being run out of cluster.
* `run-server` - If the provisioner is responsible for running the NFS server, i.e. starting and stopping NFS Ganesha. Default true.
* `use-ganesha` - If the provisioner will create volumes using NFS Ganesha (D-Bus method calls) as opposed to using the kernel NFS server ('exportfs'). If run-server is true, this must be true. Default true.
* `grace-period` - NFS Ganesha grace period to use in seconds, from 0-180. If the server is not expected to survive restarts, i.e. it is running as a pod & its export directory is not persisted, this can be set to 0. Can only be set if both run-server and use-ganesha are true. Default 90.
* `enable-xfs-quota` - If the provisioner will set xfs quotas for each volume it provisions. Requires that the directory it creates volumes in ('/export') is xfs mounted with option prjquota/pquota, and that it has the privilege to run xfs_quota. Default false.
* `failed-retry-threshold` - If the number of retries on provisioning failure need to be limited to a set number of attempts. Default 10
* `server-hostname` - The hostname for the NFS server to export from. Only applicable when running out-of-cluster i.e. it can only be set if either master or kubeconfig are set. If unset, the first IP output by `hostname -i` is used.
* `device-based-fsids` - If file system handles created by NFS Ganesha should be based on major/minor device IDs of the backing storage volume ('/export'). When running a cloud based kubernetes service (like Googles GKE service) set this to `false` as it might affect client connections on restarts of the nfs provisioner pod. Default true.
## Usage
The nfs-provisioner has been deployed and is now watching for claims it should provision volumes for. No such claims can exist until a properly configured `StorageClass` for claims to request is created.
Edit the `provisioner` field in `deploy/kubernetes/class.yaml` to be the provisioner's name. Configure the `parameters`.
### Parameters
* `gid`: `"none"` or a [supplemental group](http://kubernetes.io/docs/user-guide/security-context/) like `"1001"`. NFS shares will be created with permissions such that pods running with the supplemental group can read & write to the share, but non-root pods without the supplemental group cannot. Pods running as root can read & write to shares regardless of the setting here, unless the `rootSquash` parameter is set true. If set to `"none"`, anybody root or non-root can write to the share. Default (if omitted) `"none"`.
* `rootSquash`: `"true"` or `"false"`. Whether to squash root users by adding the NFS Ganesha root_id_squash or kernel root_squash option to each export. Default `"false"`.
* **Deprecated. Use StorageClass.mountOptions instead.** `mountOptions`: a comma separated list of [mount options](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options) for every PV of this class to be mounted with. The list is inserted directly into every PV's mount options annotation/field without any validation. Default blank `""`.
Name the `StorageClass` however you like; the name is how claims will request this class. Create the class.
```
$ kubectl create -f deploy/kubernetes/class.yaml
storageclass "example-nfs" created
```
Now if everything is working correctly, when you create a claim requesting the class you just created, the provisioner will automatically create a volume.
Edit the `volume.beta.kubernetes.io/storage-class` annotation in `deploy/kubernetes/claim.yaml` to be the name of the class. Create the claim.
```
$ kubectl create -f deploy/kubernetes/claim.yaml
persistentvolumeclaim "nfs" created
```
The nfs-provisioner provisions a PV for the PVC you just created. Its reclaim policy is Delete, so it and its backing storage will be deleted by the provisioner when the PVC is deleted.
```
$ kubectl get pv
NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM REASON AGE
pvc-dce84888-7a9d-11e6-b1ee-5254001e0c1b 1Mi RWX Delete Bound default/nfs 23s
```
A pod can consume the PVC and write to the backing NFS share. Create a pod to test this.
```
$ kubectl create -f deploy/kubernetes/write-pod.yaml
pod "write-pod" created
$ kubectl get pod --show-all
nfs-provisioner 1/1 Running 0 31s
write-pod 0/1 Completed 0 41s
```
Once you are done with the PVC, delete it and the provisioner will delete the PV and its backing storage.
```
$ kubectl delete pod write-pod
pod "write-pod" deleted
$ kubectl delete pvc nfs
persistentvolumeclaim "nfs" deleted
$ kubectl get pv
```
Note that deleting or stopping a provisioner won't delete the `PersistentVolume` objects it created.
If at any point things don't work correctly, check the provisioner's logs using `kubectl logs` and look for events in the PVs and PVCs using `kubectl describe`.
### Using as default
The provisioner can be used as the default storage provider, meaning claims that don't request a `StorageClass` get volumes provisioned for them by the provisioner by default. To set as the default a `StorageClass` that specifies the provisioner, turn on the `DefaultStorageClass` admission-plugin and add the `storageclass.beta.kubernetes.io/is-default-class` annotation to the class. See http://kubernetes.io/docs/user-guide/persistent-volumes/#class-1 for more information.
module github.com/kubernetes-incubator/external-storage/nfs
go 1.12
require (
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/guelfey/go.dbus v0.0.0-20131113121618-f6a3a2366cc3
github.com/imdario/mergo v0.3.7 // indirect
github.com/kubernetes-sigs/sig-storage-lib-external-provisioner v4.0.0+incompatible
github.com/miekg/dns v1.1.15 // indirect
github.com/prometheus/client_golang v1.1.0 // indirect
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
k8s.io/api v0.0.0-20190814101207-0772a1bdf941
k8s.io/apimachinery v0.0.0-20190814100815-533d101be9a6
k8s.io/client-go v0.0.0-20190816061517-44c2c549a534
k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a // indirect
sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible // indirect
)
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/guelfey/go.dbus v0.0.0-20131113121618-f6a3a2366cc3 h1:fngCxKbvZdctIsWj2hYijhAt4iK0JXSSA78B36xP0yI=
github.com/guelfey/go.dbus v0.0.0-20131113121618-f6a3a2366cc3/go.mod h1:0CNX5Cvi77WEH8llpfZ/ieuqyceb1cnO5//b5zzsnF8=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kubernetes-sigs/sig-storage-lib-external-provisioner v4.0.0+incompatible h1:dlX8ibVbRDH/HJ7jEWksBXtemdj2tF6vgA4zJzNrDyg=
github.com/kubernetes-sigs/sig-storage-lib-external-provisioner v4.0.0+incompatible/go.mod h1:+FITXJbAUSA7t7e3NGr36Ftd5qM4OpI6lIyq/F5y1Go=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI=
github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68=
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o=
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
k8s.io/api v0.0.0-20190629053923-1634385ce462/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A=
k8s.io/api v0.0.0-20190814101207-0772a1bdf941 h1:Y8yEkyPyJstRyZRD2qAVXeFfgilYKxdxB8zjO0cb/XY=
k8s.io/api v0.0.0-20190814101207-0772a1bdf941/go.mod h1:PBHnH5pdKCvv/+J4D3mpFXQCHFqtBbB1JvlkIVrfqrc=
k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA=
k8s.io/apimachinery v0.0.0-20190807091453-5477731e3db2/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA=
k8s.io/apimachinery v0.0.0-20190814100815-533d101be9a6 h1:g3kHsVIF7tLDtdP1RPw/Kuy3ANzPG5QPVwQ52qYkI0U=
k8s.io/apimachinery v0.0.0-20190814100815-533d101be9a6/go.mod h1:MAmngDqHkEif0Kxdsl08wStgNTmHhat4DuIUyb4LbCc=
k8s.io/client-go v0.0.0-20190816061517-44c2c549a534 h1:3lGc+Wr4mxhgHMGCl+1kbnw2du8Hpcyuyj/fWtGx7cc=
k8s.io/client-go v0.0.0-20190816061517-44c2c549a534/go.mod h1:aNb/TGHrwq3Ss8gL4sy5+wueJyyEqIPBAKL8LBGW5v8=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ=
k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058 h1:di3XCwddOR9cWBNpfgXaskhh6cgJuwcK54rvtwUaC10=
k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4=
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a h1:uy5HAgt4Ha5rEMbhZA+aM1j2cq5LmR6LQ71EYC2sVH4=
k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible h1:qV3eFdgCp7Cp/ORjkJI9VBBEOntT+z385jLqdBtmgHA=
sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible/go.mod h1:qhqLyNwJC49PoUalmtzYb4s9fT8HOMBTLbTY1QoVOqI=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"regexp"
"strings"
"syscall"
"github.com/golang/glog"
)
var defaultGaneshaConfigContents = []byte(`
###################################################
#
# EXPORT
#
# To function, all that is required is an EXPORT
#
# Define the absolute minimal export
#
###################################################
EXPORT
{
# Export Id (mandatory, each EXPORT must have a unique Export_Id)
Export_Id = 0;
# Exported path (mandatory)
Path = /nonexistent;
# Pseudo Path (required for NFS v4)
Pseudo = /nonexistent;
# Required for access (default is None)
# Could use CLIENT blocks instead
Access_Type = RW;
# Exporting FSAL
FSAL {
Name = VFS;
}
}
NFS_Core_Param
{
MNT_Port = 20048;
fsid_device = true;
}
NFSV4
{
Grace_Period = 90;
}
`)
// Setup sets up various prerequisites and settings for the server. If an error
// is encountered at any point it returns it instantly
func Setup(ganeshaConfig string, gracePeriod uint, fsidDevice bool) error {
// Start rpcbind if it is not started yet
cmd := exec.Command("/usr/sbin/rpcinfo", "127.0.0.1")
if err := cmd.Run(); err != nil {
cmd = exec.Command("/usr/sbin/rpcbind", "-w")
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("Starting rpcbind failed with error: %v, output: %s", err, out)
}
}
cmd = exec.Command("/usr/sbin/rpc.statd")
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("rpc.statd failed with error: %v, output: %s", err, out)
}
// Start dbus, needed for ganesha dynamic exports
cmd = exec.Command("dbus-daemon", "--system")
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("dbus-daemon failed with error: %v, output: %s", err, out)
}
err := setRlimitNOFILE()
if err != nil {
glog.Warningf("Error setting RLIMIT_NOFILE, there may be 'Too many open files' errors later: %v", err)
}
// Use defaultGaneshaConfigContents if the ganeshaConfig doesn't exist yet
if _, err = os.Stat(ganeshaConfig); os.IsNotExist(err) {
err = ioutil.WriteFile(ganeshaConfig, defaultGaneshaConfigContents, 0600)
if err != nil {
return fmt.Errorf("error writing ganesha config %s: %v", ganeshaConfig, err)
}
}
err = setGracePeriod(ganeshaConfig, gracePeriod)
if err != nil {
return fmt.Errorf("error setting grace period to ganesha config: %v", err)
}
err = setFsidDevice(ganeshaConfig, fsidDevice)
if err != nil {
return fmt.Errorf("error setting fsid device to ganesha config: %v", err)
}
return nil
}
// Run : run the NFS server in the foreground until it exits
// Ideally, it should never exit when run in foreground mode
// We force foreground to allow the provisioner process to restart
// the server if it crashes - daemonization prevents us from using Wait()
// for this purpose
func Run(ganeshaLog, ganeshaPid, ganeshaConfig string) error {
// Start ganesha.nfsd
glog.Infof("Running NFS server!")
cmd := exec.Command("ganesha.nfsd", "-F", "-L", ganeshaLog, "-p", ganeshaPid, "-f", ganeshaConfig)
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("ganesha.nfsd failed with error: %v, output: %s", err, out)
}
return nil
}
func setRlimitNOFILE() error {
var rlimit syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
if err != nil {
return fmt.Errorf("error getting RLIMIT_NOFILE: %v", err)
}
glog.Infof("starting RLIMIT_NOFILE rlimit.Cur %d, rlimit.Max %d", rlimit.Cur, rlimit.Max)
rlimit.Max = 1024 * 1024
rlimit.Cur = 1024 * 1024
err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit)
if err != nil {
return err
}
err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
if err != nil {
return fmt.Errorf("error getting RLIMIT_NOFILE: %v", err)
}
glog.Infof("ending RLIMIT_NOFILE rlimit.Cur %d, rlimit.Max %d", rlimit.Cur, rlimit.Max)
return nil
}
func setFsidDevice(ganeshaConfig string, fsidDevice bool) error {
newLine := fmt.Sprintf("fsid_device = %t;", fsidDevice)
re := regexp.MustCompile("fsid_device = (true|false);")
read, err := ioutil.ReadFile(ganeshaConfig)
if err != nil {
return err
}
oldLine := re.Find(read)
if oldLine == nil {
// fsid_device line not there, append it after MNT_Port
re := regexp.MustCompile("MNT_Port = 20048;")
mntPort := re.Find(read)
block := "MNT_Port = 20048;\n" +
"\t" + newLine
replaced := strings.Replace(string(read), string(mntPort), block, -1)
err = ioutil.WriteFile(ganeshaConfig, []byte(replaced), 0)
if err != nil {
return err
}
} else {
// fsid_device there, just replace it
replaced := strings.Replace(string(read), string(oldLine), newLine, -1)
err = ioutil.WriteFile(ganeshaConfig, []byte(replaced), 0)
if err != nil {
return err
}
}
return nil
}
func setGracePeriod(ganeshaConfig string, gracePeriod uint) error {
if gracePeriod > 180 {
return fmt.Errorf("grace period cannot be greater than 180")
}
newLine := fmt.Sprintf("Grace_Period = %d;", gracePeriod)
re := regexp.MustCompile("Grace_Period = [0-9]+;")
read, err := ioutil.ReadFile(ganeshaConfig)
if err != nil {
return err
}
oldLine := re.Find(read)
var file *os.File
if oldLine == nil {
// Grace_Period line not there, append the whole NFSV4 block.
file, err = os.OpenFile(ganeshaConfig, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
return err
}
defer file.Close()
block := "\nNFSV4\n{\n" +
"\t" + newLine + "\n" +
"}\n"
if _, err = file.WriteString(block); err != nil {
return err
}
file.Sync()
} else {
// Grace_Period line there, just replace it
replaced := strings.Replace(string(read), string(oldLine), newLine, -1)
err = ioutil.WriteFile(ganeshaConfig, []byte(replaced), 0)
if err != nil {
return err
}
}
return nil
}
// Stop stops the NFS server.
func Stop() {
// /bin/dbus-send --system --dest=org.ganesha.nfsd --type=method_call /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.shutdown
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
"os"
"path"
"strconv"
"github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller"
"k8s.io/api/core/v1"
)
// Delete removes the directory that was created by Provision backing the given
// PV and removes its export from the NFS server.
func (p *nfsProvisioner) Delete(volume *v1.PersistentVolume) error {
// Ignore the call if this provisioner was not the one to provision the
// volume. It doesn't even attempt to delete it, so it's neither a success
// (nil error) nor failure (any other error)
provisioned, err := p.provisioned(volume)
if err != nil {
return fmt.Errorf("error determining if this provisioner was the one to provision volume %q: %v", volume.Name, err)
}
if !provisioned {
strerr := fmt.Sprintf("this provisioner id %s didn't provision volume %q and so can't delete it; id %s did & can", p.identity, volume.Name, volume.Annotations[annProvisionerID])
return &controller.IgnoredError{Reason: strerr}
}
err = p.deleteDirectory(volume)
if err != nil {
return fmt.Errorf("error deleting volume's backing path: %v", err)
}
err = p.deleteExport(volume)
if err != nil {
return fmt.Errorf("deleted the volume's backing path but error deleting export: %v", err)
}
err = p.deleteQuota(volume)
if err != nil {
return fmt.Errorf("deleted the volume's backing path & export but error deleting quota: %v", err)
}
return nil
}
func (p *nfsProvisioner) provisioned(volume *v1.PersistentVolume) (bool, error) {
provisionerID, ok := volume.Annotations[annProvisionerID]
if !ok {
return false, fmt.Errorf("PV doesn't have an annotation %s", annProvisionerID)
}
return provisionerID == string(p.identity), nil
}
func (p *nfsProvisioner) deleteDirectory(volume *v1.PersistentVolume) error {
path := path.Join(p.exportDir, volume.ObjectMeta.Name)
if _, err := os.Stat(path); os.IsNotExist(err) {
return nil
}
if err := os.RemoveAll(path); err != nil {
return err
}
return nil
}
func (p *nfsProvisioner) deleteExport(volume *v1.PersistentVolume) error {
block, exportID, err := getBlockAndID(volume, annExportBlock, annExportID)
if err != nil {
return fmt.Errorf("error getting block &/or id from annotations: %v", err)
}
if err := p.exporter.RemoveExportBlock(block, uint16(exportID)); err != nil {
return fmt.Errorf("error removing the export from the config file: %v", err)
}
if err := p.exporter.Unexport(volume); err != nil {
return fmt.Errorf("removed export from the config file but error unexporting it: %v", err)
}
return nil
}
func (p *nfsProvisioner) deleteQuota(volume *v1.PersistentVolume) error {
block, projectID, err := getBlockAndID(volume, annProjectBlock, annProjectID)
if err != nil {
return fmt.Errorf("error getting block &/or id from annotations: %v", err)
}
if err := p.quotaer.RemoveProject(block, uint16(projectID)); err != nil {
return fmt.Errorf("error removing the quota project from the projects file: %v", err)
}
if err := p.quotaer.UnsetQuota(); err != nil {
return fmt.Errorf("removed quota project from the project file but error unsetting the quota: %v", err)
}
return nil
}
func getBlockAndID(volume *v1.PersistentVolume, annBlock, annID string) (string, uint16, error) {
block, ok := volume.Annotations[annBlock]
if !ok {
return "", 0, fmt.Errorf("PV doesn't have an annotation with key %s", annBlock)
}
idStr, ok := volume.Annotations[annID]
if !ok {
return "", 0, fmt.Errorf("PV doesn't have an annotation %s", annID)
}
id, _ := strconv.ParseUint(idStr, 10, 16)
return block, uint16(id), nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
"sync"
"github.com/golang/glog"
"github.com/guelfey/go.dbus"
"k8s.io/api/core/v1"
)
type exporter interface {
CanExport(int) bool
AddExportBlock(string, bool, string) (string, uint16, error)
RemoveExportBlock(string, uint16) error
Export(string) error
Unexport(*v1.PersistentVolume) error
}
type exportBlockCreator interface {
CreateExportBlock(string, string, bool, string) string
}
type exportMap struct {
// Map to track used exportIDs. Each ganesha export needs a unique fsid and
// Export_Id, each kernel a unique fsid. Assign each export an exportID and
// use it as both fsid and Export_Id.
exportIDs map[uint16]bool
}
func (e *exportMap) CanExport(limit int) bool {
if limit < 0 {
return true
}
totalExports := len(e.exportIDs)
return totalExports < limit
}
type genericExporter struct {
*exportMap
ebc exportBlockCreator
config string
mapMutex *sync.Mutex
fileMutex *sync.Mutex
}
func newGenericExporter(ebc exportBlockCreator, config string, re *regexp.Regexp) *genericExporter {
if _, err := os.Stat(config); os.IsNotExist(err) {
glog.Fatalf("config %s does not exist!", config)
}
exportIDs, err := getExistingIDs(config, re)
if err != nil {
glog.Errorf("error while populating exportIDs map, there may be errors exporting later if exportIDs are reused: %v", err)
}
return &genericExporter{
exportMap: &exportMap{
exportIDs: exportIDs,
},
ebc: ebc,
config: config,
mapMutex: &sync.Mutex{},
fileMutex: &sync.Mutex{},
}
}
func (e *genericExporter) AddExportBlock(path string, rootSquash bool, exportSubnet string) (string, uint16, error) {
exportID := generateID(e.mapMutex, e.exportIDs)
exportIDStr := strconv.FormatUint(uint64(exportID), 10)
block := e.ebc.CreateExportBlock(exportIDStr, path, rootSquash, exportSubnet)
// Add the export block to the config file
if err := addToFile(e.fileMutex, e.config, block); err != nil {
deleteID(e.mapMutex, e.exportIDs, exportID)
return "", 0, fmt.Errorf("error adding export block %s to config %s: %v", block, e.config, err)
}
return block, exportID, nil
}
func (e *genericExporter) RemoveExportBlock(block string, exportID uint16) error {
deleteID(e.mapMutex, e.exportIDs, exportID)
return removeFromFile(e.fileMutex, e.config, block)
}
type ganeshaExporter struct {
genericExporter
}
var _ exporter = &ganeshaExporter{}
func newGaneshaExporter(ganeshaConfig string) exporter {
return &ganeshaExporter{
genericExporter: *newGenericExporter(&ganeshaExportBlockCreator{}, ganeshaConfig, regexp.MustCompile("Export_Id = ([0-9]+);")),
}
}
// Export exports the given directory using NFS Ganesha, assuming it is running
// and can be connected to using D-Bus.
func (e *ganeshaExporter) Export(path string) error {
// Call AddExport using dbus
conn, err := dbus.SystemBus()
if err != nil {
return fmt.Errorf("error getting dbus session bus: %v", err)
}
obj := conn.Object("org.ganesha.nfsd", "/org/ganesha/nfsd/ExportMgr")
call := obj.Call("org.ganesha.nfsd.exportmgr.AddExport", 0, e.config, fmt.Sprintf("export(path = %s)", path))
if call.Err != nil {
return fmt.Errorf("error calling org.ganesha.nfsd.exportmgr.AddExport: %v", call.Err)
}
return nil
}
func (e *ganeshaExporter) Unexport(volume *v1.PersistentVolume) error {
ann, ok := volume.Annotations[annExportID]
if !ok {
return fmt.Errorf("PV doesn't have an annotation %s, can't remove the export from the server", annExportID)
}
exportID, _ := strconv.ParseUint(ann, 10, 16)
// Call RemoveExport using dbus
conn, err := dbus.SystemBus()
if err != nil {
return fmt.Errorf("error getting dbus session bus: %v", err)
}
obj := conn.Object("org.ganesha.nfsd", "/org/ganesha/nfsd/ExportMgr")
call := obj.Call("org.ganesha.nfsd.exportmgr.RemoveExport", 0, uint16(exportID))
if call.Err != nil {
return fmt.Errorf("error calling org.ganesha.nfsd.exportmgr.RemoveExport: %v", call.Err)
}
return nil
}
type ganeshaExportBlockCreator struct{}
var _ exportBlockCreator = &ganeshaExportBlockCreator{}
// CreateBlock creates the text block to add to the ganesha config file.
func (e *ganeshaExportBlockCreator) CreateExportBlock(exportID, path string, rootSquash bool, exportSubnet string) string {
squash := "no_root_squash"
if rootSquash {
squash = "root_id_squash"
}
return "\nEXPORT\n{\n" +
"\tExport_Id = " + exportID + ";\n" +
"\tPath = " + path + ";\n" +
"\tPseudo = " + path + ";\n" +
"\tAccess_Type = RW;\n" +
"\tSquash = " + squash + ";\n" +
"\tSecType = sys;\n" +
"\tFilesystem_id = " + exportID + "." + exportID + ";\n" +
"\tFSAL {\n\t\tName = VFS;\n\t}\n}\n"
}
type kernelExporter struct {
genericExporter
}
var _ exporter = &kernelExporter{}
func newKernelExporter() exporter {
return &kernelExporter{
genericExporter: *newGenericExporter(&kernelExportBlockCreator{}, "/etc/exports", regexp.MustCompile("fsid=([0-9]+)")),
}
}
// Export exports all directories listed in /etc/exports
func (e *kernelExporter) Export(_ string) error {
// Execute exportfs
cmd := exec.Command("exportfs", "-r")
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("exportfs -r failed with error: %v, output: %s", err, out)
}
return nil
}
func (e *kernelExporter) Unexport(volume *v1.PersistentVolume) error {
// Execute exportfs
cmd := exec.Command("exportfs", "-r")
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("exportfs -r failed with error: %v, output: %s", err, out)
}
return nil
}
type kernelExportBlockCreator struct{}
var _ exportBlockCreator = &kernelExportBlockCreator{}
// CreateBlock creates the text block to add to the /etc/exports file.
func (e *kernelExportBlockCreator) CreateExportBlock(exportID, path string, rootSquash bool, exportSubnet string) string {
squash := "no_root_squash"
if rootSquash {
squash = "root_squash"
}
return "\n" + path + " " + exportSubnet + "(rw,insecure," + squash + ",fsid=" + exportID + ")\n"
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"reflect"
"strconv"
"strings"
"syscall"
"github.com/golang/glog"
"github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes"
)
const (
// Name of the file where an nfsProvisioner will store its identity
identityFile = "nfs-provisioner.identity"
// are we allowed to set this? else make up our own
annCreatedBy = "kubernetes.io/createdby"
createdBy = "nfs-dynamic-provisioner"
// A PV annotation for the entire ganesha EXPORT block or /etc/exports
// block, needed for deletion.
annExportBlock = "EXPORT_block"
// A PV annotation for the exportID of this PV's backing ganesha/kernel export
// , needed for ganesha deletion and used for deleting the entry in exportIDs
// map so the id can be reassigned.
annExportID = "Export_Id"
// A PV annotation for the project quota info block, needed for quota
// deletion.
annProjectBlock = "Project_block"
// A PV annotation for the project quota id, needed for quota deletion
annProjectID = "Project_Id"
// VolumeGidAnnotationKey is the key of the annotation on the PersistentVolume
// object that specifies a supplemental GID.
VolumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
// MountOptionAnnotation is the annotation on a PV object that specifies a
// comma separated list of mount options
MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
// A PV annotation for the identity of the nfsProvisioner that provisioned it
annProvisionerID = "Provisioner_Id"
podIPEnv = "POD_IP"
serviceEnv = "SERVICE_NAME"
namespaceEnv = "POD_NAMESPACE"
nodeEnv = "NODE_NAME"
)
// NewNFSProvisioner creates a Provisioner that provisions NFS PVs backed by
// the given directory.
func NewNFSProvisioner(exportDir string, client kubernetes.Interface, outOfCluster bool, useGanesha bool, ganeshaConfig string, enableXfsQuota bool, serverHostname string, maxExports int, exportSubnet string) controller.Provisioner {
var exp exporter
if useGanesha {
exp = newGaneshaExporter(ganeshaConfig)
} else {
exp = newKernelExporter()
}
var quotaer quotaer
var err error
if enableXfsQuota {
quotaer, err = newXfsQuotaer(exportDir)
if err != nil {
glog.Fatalf("Error creating xfs quotaer! %v", err)
}
} else {
quotaer = newDummyQuotaer()
}
return newNFSProvisionerInternal(exportDir, client, outOfCluster, exp, quotaer, serverHostname, maxExports, exportSubnet)
}
func newNFSProvisionerInternal(exportDir string, client kubernetes.Interface, outOfCluster bool, exporter exporter, quotaer quotaer, serverHostname string, maxExports int, exportSubnet string) *nfsProvisioner {
if _, err := os.Stat(exportDir); os.IsNotExist(err) {
glog.Fatalf("exportDir %s does not exist!", exportDir)
}
var identity types.UID
identityPath := path.Join(exportDir, identityFile)
if _, err := os.Stat(identityPath); os.IsNotExist(err) {
identity = uuid.NewUUID()
err := ioutil.WriteFile(identityPath, []byte(identity), 0600)
if err != nil {
glog.Fatalf("Error writing identity file %s! %v", identityPath, err)
}
} else {
read, err := ioutil.ReadFile(identityPath)
if err != nil {
glog.Fatalf("Error reading identity file %s! %v", identityPath, err)
}
identity = types.UID(strings.TrimSpace(string(read)))
}
provisioner := &nfsProvisioner{
exportDir: exportDir,
client: client,
outOfCluster: outOfCluster,
exporter: exporter,
quotaer: quotaer,
serverHostname: serverHostname,
maxExports: maxExports,
exportSubnet: exportSubnet,
identity: identity,
podIPEnv: podIPEnv,
serviceEnv: serviceEnv,
namespaceEnv: namespaceEnv,
nodeEnv: nodeEnv,
}
return provisioner
}
type nfsProvisioner struct {
// The directory to create PV-backing directories in
exportDir string
// Client, needed for getting a service cluster IP to put as the NFS server of
// provisioned PVs
client kubernetes.Interface
// Whether the provisioner is running out of cluster and so cannot rely on
// the existence of any of the pod, service, namespace, node env variables.
outOfCluster bool
// The exporter to use for exporting NFS shares
exporter exporter
// The quotaer to use for setting per-share/directory/project quotas
quotaer quotaer
// The hostname for the NFS server to export from. Only applicable when
// running as a Docker container
serverHostname string
// The maximum number of volumes to be exported by the provisioner
maxExports int
// Subnet for NFS export to allow mount only from
exportSubnet string
// Identity of this nfsProvisioner, generated & persisted to exportDir or
// recovered from there. Used to mark provisioned PVs
identity types.UID
// Environment variables the provisioner pod needs valid values for in order to
// put a service cluster IP as the server of provisioned NFS PVs, passed in
// via downward API. If serviceEnv is set, namespaceEnv must be too.
podIPEnv string
serviceEnv string
namespaceEnv string
nodeEnv string
}
var _ controller.Provisioner = &nfsProvisioner{}
var _ controller.Qualifier = &nfsProvisioner{}
// ShouldProvision returns whether provisioning should be attempted for the given
// claim.
func (p *nfsProvisioner) ShouldProvision(claim *v1.PersistentVolumeClaim) bool {
// As long as the export limit has not been reached we're ok to provision
ok := p.checkExportLimit()
if !ok {
glog.Infof("export limit reached. skipping claim %s/%s", claim.Namespace, claim.Name)
}
return ok
}
// Provision creates a volume i.e. the storage asset and returns a PV object for
// the volume.
func (p *nfsProvisioner) Provision(options controller.ProvisionOptions) (*v1.PersistentVolume, error) {
volume, err := p.createVolume(options)
if err != nil {
return nil, err
}
annotations := make(map[string]string)
annotations[annCreatedBy] = createdBy
annotations[annExportBlock] = volume.exportBlock
annotations[annExportID] = strconv.FormatUint(uint64(volume.exportID), 10)
annotations[annProjectBlock] = volume.projectBlock
annotations[annProjectID] = strconv.FormatUint(uint64(volume.projectID), 10)
if volume.supGroup != 0 {
annotations[VolumeGidAnnotationKey] = strconv.FormatUint(volume.supGroup, 10)
}
// Only use legacy mount options annotation if StorageClass.MountOptions is empty
if volume.mountOptions != "" && options.StorageClass.MountOptions == nil {
annotations[MountOptionAnnotation] = volume.mountOptions
}
annotations[annProvisionerID] = string(p.identity)
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: options.PVName,
Labels: map[string]string{},
Annotations: annotations,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: *options.StorageClass.ReclaimPolicy,
AccessModes: options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],
},
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: volume.server,
Path: volume.path,
ReadOnly: false,
},
},
MountOptions: options.StorageClass.MountOptions,
},
}
return pv, nil
}
type volume struct {
server string
path string
exportBlock string
exportID uint16
projectBlock string
projectID uint16
supGroup uint64
mountOptions string
}
// createVolume creates a volume i.e. the storage asset. It creates a unique
// directory under /export and exports it. Returns the server IP, the path, a
// zero/non-zero supplemental group, the block it added to either the ganesha
// config or /etc/exports, and the exportID
// TODO return values
func (p *nfsProvisioner) createVolume(options controller.ProvisionOptions) (volume, error) {
gid, rootSquash, mountOptions, err := p.validateOptions(options)
if err != nil {
return volume{}, fmt.Errorf("error validating options for volume: %v", err)
}
server, err := p.getServer()
if err != nil {
return volume{}, fmt.Errorf("error getting NFS server IP for volume: %v", err)
}
if ok := p.checkExportLimit(); !ok {
return volume{}, &controller.IgnoredError{Reason: fmt.Sprintf("export limit of %v has been reached", p.maxExports)}
}
path := path.Join(p.exportDir, options.PVName)
err = p.createDirectory(options.PVName, gid)
if err != nil {
return volume{}, fmt.Errorf("error creating directory for volume: %v", err)
}
exportBlock, exportID, err := p.createExport(options.PVName, rootSquash)
if err != nil {
os.RemoveAll(path)
return volume{}, fmt.Errorf("error creating export for volume: %v", err)
}
projectBlock, projectID, err := p.createQuota(options.PVName, options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)])
if err != nil {
os.RemoveAll(path)
return volume{}, fmt.Errorf("error creating quota for volume: %v", err)
}
return volume{
server: server,
path: path,
exportBlock: exportBlock,
exportID: exportID,
projectBlock: projectBlock,
projectID: projectID,
supGroup: 0,
mountOptions: mountOptions,
}, nil
}
func (p *nfsProvisioner) validateOptions(options controller.ProvisionOptions) (string, bool, string, error) {
gid := "none"
rootSquash := false
mountOptions := ""
for k, v := range options.StorageClass.Parameters {
switch strings.ToLower(k) {
case "gid":
if strings.ToLower(v) == "none" {
gid = "none"
} else if i, err := strconv.ParseUint(v, 10, 64); err == nil && i != 0 {
gid = v
} else {
return "", false, "", fmt.Errorf("invalid value for parameter gid: %v. valid values are: 'none' or a non-zero integer", v)
}
case "rootsquash":
var err error
rootSquash, err = strconv.ParseBool(v)
if err != nil {
return "", false, "", fmt.Errorf("invalid value for parameter rootSquash: %v. valid values are: 'true' or 'false'", v)
}
case "mountoptions":
mountOptions = v
default:
return "", false, "", fmt.Errorf("invalid parameter: %q", k)
}
}
// TODO implement options.ProvisionerSelector parsing
// pv.Labels MUST be set to match claim.spec.selector
// gid selector? with or without pv annotation?
if options.PVC.Spec.Selector != nil {
return "", false, "", fmt.Errorf("claim.Spec.Selector is not supported")
}
var stat syscall.Statfs_t
if err := syscall.Statfs(p.exportDir, &stat); err != nil {
return "", false, "", fmt.Errorf("error calling statfs on %v: %v", p.exportDir, err)
}
capacity := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
available := int64(stat.Bavail) * int64(stat.Bsize)
if requestBytes > available {
return "", false, "", fmt.Errorf("insufficient available space %v bytes to satisfy claim for %v bytes", available, requestBytes)
}
return gid, rootSquash, mountOptions, nil
}
// getServer gets the server IP to put in a provisioned PV's spec.
func (p *nfsProvisioner) getServer() (string, error) {
if p.outOfCluster {
if p.serverHostname != "" {
return p.serverHostname, nil
}
// TODO make this better
out, err := exec.Command("hostname", "-i").Output()
if err != nil {
return "", fmt.Errorf("hostname -i failed with error: %v, output: %s", err, out)
}
addresses := strings.Fields(string(out))
if len(addresses) > 0 {
return addresses[0], nil
}
return "", fmt.Errorf("hostname -i had bad output %s, no address to use", string(out))
}
nodeName := os.Getenv(p.nodeEnv)
if nodeName != "" {
glog.Infof("using node name %s=%s as NFS server IP", p.nodeEnv, nodeName)
return nodeName, nil
}
podIP := os.Getenv(p.podIPEnv)
if podIP == "" {
return "", fmt.Errorf("pod IP env %s must be set even if intent is to use service cluster IP as NFS server IP", p.podIPEnv)
}
serviceName := os.Getenv(p.serviceEnv)
if serviceName == "" {
glog.Infof("using potentially unstable pod IP %s=%s as NFS server IP (because neither service env %s nor node env %s are set)", p.podIPEnv, podIP, p.serviceEnv, p.nodeEnv)
return podIP, nil
}
// Service env was set, now find and validate it
namespace := os.Getenv(p.namespaceEnv)
if namespace == "" {
return "", fmt.Errorf("service env %s is set but namespace env %s isn't; no way to get the service cluster IP", p.serviceEnv, p.namespaceEnv)
}
service, err := p.client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("error getting service %s=%s in namespace %s=%s", p.serviceEnv, serviceName, p.namespaceEnv, namespace)
}
// Do some validation of the service before provisioning useless volumes
valid := false
type endpointPort struct {
port int32
protocol v1.Protocol
}
expectedPorts := map[endpointPort]bool{
{2049, v1.ProtocolTCP}: true,
{20048, v1.ProtocolTCP}: true,
{111, v1.ProtocolUDP}: true,
{111, v1.ProtocolTCP}: true,
}
endpoints, err := p.client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
for _, subset := range endpoints.Subsets {
// One service can't have multiple nfs-provisioner endpoints. If it had, kubernetes would round-robin
// the request which would probably go to the wrong instance.
if len(subset.Addresses) != 1 {
continue
}
if subset.Addresses[0].IP != podIP {
continue
}
actualPorts := make(map[endpointPort]bool)
for _, port := range subset.Ports {
actualPorts[endpointPort{port.Port, port.Protocol}] = true
}
if !reflect.DeepEqual(expectedPorts, actualPorts) {
continue
}
valid = true
break
}
if !valid {
return "", fmt.Errorf("service %s=%s is not valid; check that it has for ports %v exactly one endpoint, this pod's IP %s=%s", p.serviceEnv, serviceName, expectedPorts, p.podIPEnv, podIP)
}
if service.Spec.ClusterIP == v1.ClusterIPNone {
return "", fmt.Errorf("service %s=%s is valid but it doesn't have a cluster IP", p.serviceEnv, serviceName)
}
glog.Infof("using service %s=%s cluster IP %s as NFS server IP", p.serviceEnv, serviceName, service.Spec.ClusterIP)
return service.Spec.ClusterIP, nil
}
func (p *nfsProvisioner) checkExportLimit() bool {
return p.exporter.CanExport(p.maxExports)
}
// createDirectory creates the given directory in exportDir with appropriate
// permissions and ownership according to the given gid parameter string.
func (p *nfsProvisioner) createDirectory(directory, gid string) error {
// TODO quotas
path := path.Join(p.exportDir, directory)
if _, err := os.Stat(path); !os.IsNotExist(err) {
return fmt.Errorf("the path already exists")
}
perm := os.FileMode(0777 | os.ModeSetgid)
if gid != "none" {
// Execute permission is required for stat, which kubelet uses during unmount.
perm = os.FileMode(0071 | os.ModeSetgid)
}
if err := os.MkdirAll(path, perm); err != nil {
return err
}
// Due to umask, need to chmod
if err := os.Chmod(path, perm); err != nil {
os.RemoveAll(path)
return err
}
if gid != "none" {
groupID, err := strconv.ParseUint(gid, 10, 64)
if err != nil {
os.RemoveAll(path)
return fmt.Errorf("strconv.ParseUint failed with error: %v", err)
}
cmd := exec.Command("chgrp", strconv.FormatUint(groupID, 10), path)
out, err := cmd.CombinedOutput()
if err != nil {
os.RemoveAll(path)
return fmt.Errorf("chgrp failed with error: %v, output: %s", err, out)
}
}
return nil
}
// createExport creates the export by adding a block to the appropriate config
// file and exporting it
func (p *nfsProvisioner) createExport(directory string, rootSquash bool) (string, uint16, error) {
path := path.Join(p.exportDir, directory)
block, exportID, err := p.exporter.AddExportBlock(path, rootSquash, p.exportSubnet)
if err != nil {
return "", 0, fmt.Errorf("error adding export block for path %s: %v", path, err)
}
err = p.exporter.Export(path)
if err != nil {
p.exporter.RemoveExportBlock(block, exportID)
return "", 0, fmt.Errorf("error exporting export block %s: %v", block, err)
}
return block, exportID, nil
}
// createQuota creates a quota for the directory by adding a project to
// represent the directory and setting a quota on it
func (p *nfsProvisioner) createQuota(directory string, capacity resource.Quantity) (string, uint16, error) {
path := path.Join(p.exportDir, directory)
limit := strconv.FormatInt(capacity.Value(), 10)
block, projectID, err := p.quotaer.AddProject(path, limit)
if err != nil {
return "", 0, fmt.Errorf("error adding project for path %s: %v", path, err)
}
err = p.quotaer.SetQuota(projectID, path, limit)
if err != nil {
p.quotaer.RemoveProject(block, projectID)
return "", 0, fmt.Errorf("error setting quota for path %s: %v", path, err)
}
return block, projectID, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"errors"
"fmt"
"io/ioutil"
"math"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"testing"
"github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
)
func TestCreateVolume(t *testing.T) {
tmpDir := utiltesting.MkTmpdirOrDie("nfsProvisionTest")
defer os.RemoveAll(tmpDir)
delete := v1.PersistentVolumeReclaimDelete
tests := []struct {
name string
options controller.ProvisionOptions
envKey string
expectedServer string
expectedPath string
expectedGroup uint64
expectedBlock string
expectedExportID uint16
expectError bool
expectIgnored bool
}{
{
name: "succeed creating volume",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{},
},
PVName: "pvc-1",
PVC: newClaim(resource.MustParse("1Ki"), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, nil),
},
envKey: podIPEnv,
expectedServer: "1.1.1.1",
expectedPath: tmpDir + "/pvc-1",
expectedGroup: 0,
expectedBlock: "\nExport_Id = 1;\n",
expectedExportID: 1,
expectError: false,
},
{
name: "succeed creating volume again",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{},
},
PVName: "pvc-2",
PVC: newClaim(resource.MustParse("1Ki"), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, nil),
},
envKey: podIPEnv,
expectedServer: "1.1.1.1",
expectedPath: tmpDir + "/pvc-2",
expectedGroup: 0,
expectedBlock: "\nExport_Id = 2;\n",
expectedExportID: 2,
expectError: false,
},
{
name: "bad parameter",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{"foo": "bar"},
},
PVName: "pvc-3",
PVC: newClaim(resource.MustParse("1Ki"), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, nil),
},
envKey: podIPEnv,
expectedServer: "",
expectedPath: "",
expectedGroup: 0,
expectedBlock: "",
expectedExportID: 0,
expectError: true,
expectIgnored: false,
},
{
name: "bad server",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{},
},
PVName: "pvc-4",
PVC: newClaim(resource.MustParse("1Ki"), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, nil),
},
envKey: serviceEnv,
expectedServer: "",
expectedPath: "",
expectedGroup: 0,
expectedBlock: "",
expectedExportID: 0,
expectError: true,
expectIgnored: false,
},
{
name: "dir already exists",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{},
},
PVName: "pvc-1",
PVC: newClaim(resource.MustParse("1Ki"), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, nil),
},
envKey: podIPEnv,
expectedServer: "",
expectedPath: "",
expectedGroup: 0,
expectedBlock: "",
expectedExportID: 0,
expectError: true,
expectIgnored: false,
},
{
name: "error exporting",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{},
},
PVName: "FAIL_TO_EXPORT_ME",
PVC: newClaim(resource.MustParse("1Ki"), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, nil),
},
envKey: podIPEnv,
expectedServer: "",
expectedPath: "",
expectedGroup: 0,
expectedBlock: "",
expectedExportID: 0,
expectError: true,
},
{
name: "succeed creating volume last slot",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{},
},
PVName: "pvc-3",
PVC: newClaim(resource.MustParse("1Ki"), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, nil),
},
envKey: podIPEnv,
expectedServer: "1.1.1.1",
expectedPath: tmpDir + "/pvc-3",
expectedGroup: 0,
expectedBlock: "\nExport_Id = 3;\n",
expectedExportID: 3,
expectError: false,
},
{
name: "max export limit exceeded",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{},
},
PVName: "pvc-3",
PVC: newClaim(resource.MustParse("1Ki"), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, nil),
},
envKey: podIPEnv,
expectedServer: "",
expectedPath: "",
expectedGroup: 0,
expectedBlock: "",
expectedExportID: 0,
expectError: true,
expectIgnored: true,
},
}
client := fake.NewSimpleClientset()
conf := tmpDir + "/test"
_, err := os.Create(conf)
if err != nil {
t.Errorf("Error creating file %s: %v", conf, err)
}
exporter := &testExporter{
exportMap: &exportMap{exportIDs: map[uint16]bool{}},
config: conf,
}
maxExports := 3
p := newNFSProvisionerInternal(tmpDir+"/", client, false, exporter, newDummyQuotaer(), "", maxExports, "*")
for _, test := range tests {
os.Setenv(test.envKey, "1.1.1.1")
volume, err := p.createVolume(test.options)
if err == nil {
p.exporter.(*testExporter).exportIDs[volume.exportID] = true
}
evaluate(t, test.name, test.expectError, err, test.expectedServer, volume.server, "server")
evaluate(t, test.name, test.expectError, err, test.expectedPath, volume.path, "path")
evaluate(t, test.name, test.expectError, err, test.expectedGroup, volume.supGroup, "group")
evaluate(t, test.name, test.expectError, err, test.expectedBlock, volume.exportBlock, "block")
evaluate(t, test.name, test.expectError, err, test.expectedExportID, volume.exportID, "export id")
_, isIgnored := err.(*controller.IgnoredError)
evaluate(t, test.name, test.expectError, err, test.expectIgnored, isIgnored, "ignored error")
os.Unsetenv(test.envKey)
}
}
func TestValidateOptions(t *testing.T) {
tmpDir := utiltesting.MkTmpdirOrDie("nfsProvisionTest")
defer os.RemoveAll(tmpDir)
delete := v1.PersistentVolumeReclaimDelete
tests := []struct {
name string
options controller.ProvisionOptions
expectedGid string
expectedRootSquash bool
expectError bool
}{
{
name: "empty parameters",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{},
},
PVC: newClaim(resource.MustParse("1Ki"), nil, nil),
},
expectedGid: "none",
expectError: false,
},
{
name: "gid parameter value 'none'",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{"gid": "none"},
},
PVC: newClaim(resource.MustParse("1Ki"), nil, nil),
},
expectedGid: "none",
expectError: false,
},
{
name: "gid parameter value id",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{"gid": "1"},
},
PVC: newClaim(resource.MustParse("1Ki"), nil, nil),
},
expectedGid: "1",
expectError: false,
},
{
name: "bad parameter name",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{"foo": "bar"},
},
},
expectedGid: "",
expectError: true,
},
{
name: "bad gid parameter value string",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{"gid": "foo"},
},
},
expectedGid: "",
expectError: true,
},
{
name: "bad gid parameter value zero",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{"gid": "0"},
},
},
expectedGid: "",
expectError: true,
},
{
name: "bad gid parameter value negative",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{"gid": "-1"},
},
},
expectedGid: "",
expectError: true,
},
{
name: "root squash parameter value 'true'",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{"rootSquash": "true"},
},
PVC: newClaim(resource.MustParse("1Ki"), nil, nil),
},
expectedGid: "none",
expectedRootSquash: true,
expectError: false,
},
{
name: "root squash parameter value 'false'",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{"rootSquash": "false"},
},
PVC: newClaim(resource.MustParse("1Ki"), nil, nil),
},
expectedGid: "none",
expectedRootSquash: false,
expectError: false,
},
{
name: "bad root squash parameter value neither 'true' nor 'false'",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{"rootSquash": "asdf"},
},
PVC: newClaim(resource.MustParse("1Ki"), nil, nil),
},
expectError: true,
},
// TODO implement options.ProvisionerSelector parsing
{
name: "mount options parameter key",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
Parameters: map[string]string{"mountOptions": "asdf"},
},
PVC: newClaim(resource.MustParse("1Ki"), nil, nil),
},
expectedGid: "none",
expectError: false,
},
// TODO implement options.ProvisionerSelector parsing
{
name: "non-nil selector",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
},
PVC: newClaim(resource.MustParse("1Ki"), nil, &metav1.LabelSelector{MatchLabels: nil}),
},
expectedGid: "",
expectError: true,
},
{
name: "bad capacity",
options: controller.ProvisionOptions{
StorageClass: &storagev1.StorageClass{
ReclaimPolicy: &delete,
},
PVC: newClaim(resource.MustParse("1Ei"), nil, nil),
},
expectedGid: "",
expectError: true,
},
}
client := fake.NewSimpleClientset()
p := newNFSProvisionerInternal(tmpDir+"/", client, false, &testExporter{}, newDummyQuotaer(), "", -1, "*")
for _, test := range tests {
gid, rootSquash, _, err := p.validateOptions(test.options)
evaluate(t, test.name, test.expectError, err, test.expectedGid, gid, "gid")
evaluate(t, test.name, test.expectError, err, test.expectedRootSquash, rootSquash, "root squash")
}
}
func TestShouldProvision(t *testing.T) {
claim := newClaim(resource.MustParse("1Ki"), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, nil)
evaluateExportTests(t, "ShouldProvision", func(p *nfsProvisioner) bool {
return p.ShouldProvision(claim)
})
}
func TestCheckExportLimit(t *testing.T) {
evaluateExportTests(t, "checkExportLimit", func(p *nfsProvisioner) bool {
return p.checkExportLimit()
})
}
func evaluateExportTests(t *testing.T, output string, checker func(*nfsProvisioner) bool) {
tmpDir := utiltesting.MkTmpdirOrDie("nfsProvisionTest")
defer os.RemoveAll(tmpDir)
tests := []struct {
name string
configContents string
exportIDs map[uint16]bool
maxExports int
expectedResult bool
expectError bool
}{
{
name: "unlimited exports",
exportIDs: map[uint16]bool{1: true, 3: true},
maxExports: -1,
expectedResult: true,
expectError: false,
},
{
name: "max export limit reached",
exportIDs: map[uint16]bool{1: true, 3: true},
maxExports: 2,
expectedResult: false,
expectError: false,
},
{
name: "max export limit not reached",
exportIDs: map[uint16]bool{1: true},
maxExports: 2,
expectedResult: true,
expectError: false,
},
}
for _, test := range tests {
client := fake.NewSimpleClientset()
p := newNFSProvisionerInternal(tmpDir+"/", client, false, &testExporter{exportMap: &exportMap{exportIDs: test.exportIDs}}, newDummyQuotaer(), "", test.maxExports, "*")
ok := checker(p)
evaluate(t, test.name, test.expectError, nil, test.expectedResult, ok, output)
}
}
func TestCreateDirectory(t *testing.T) {
tmpDir := utiltesting.MkTmpdirOrDie("nfsProvisionTest")
defer os.RemoveAll(tmpDir)
fi, _ := os.Stat(tmpDir)
defaultGid := fi.Sys().(*syscall.Stat_t).Gid
tests := []struct {
name string
directory string
gid string
expectedGid uint32
expectedPerm os.FileMode
expectError bool
}{
{
name: "gid none",
directory: "foo",
gid: "none",
expectedGid: defaultGid,
expectedPerm: os.FileMode(0777),
expectError: false,
},
// {
// name: "gid 1001",
// directory: "bar",
// gid: "1001",
// expectedGid: 1001,
// expectedPerm: os.FileMode(0071),
// expectError: false,
// },
{
name: "path already exists",
directory: "foo",
gid: "none",
expectedGid: 0,
expectedPerm: 0,
expectError: true,
},
{
name: "bad gid",
directory: "baz",
gid: "foo",
expectedGid: 0,
expectedPerm: 0,
expectError: true,
},
}
client := fake.NewSimpleClientset()
p := newNFSProvisionerInternal(tmpDir+"/", client, false, &testExporter{}, newDummyQuotaer(), "", -1, "*")
for _, test := range tests {
path := p.exportDir + test.directory
defer os.RemoveAll(path)
err := p.createDirectory(test.directory, test.gid)
var gid uint32
var perm os.FileMode
var fi os.FileInfo
if !test.expectError {
fi, err = os.Stat(path)
if err != nil {
t.Logf("test case: %s", test.name)
t.Errorf("stat %s failed with error: %v", path, err)
} else {
gid = fi.Sys().(*syscall.Stat_t).Gid
perm = fi.Mode().Perm()
}
}
evaluate(t, test.name, test.expectError, err, test.expectedGid, gid, "gid owner")
evaluate(t, test.name, test.expectError, err, test.expectedPerm, perm, "permission bits")
}
}
func TestAddToRemoveFromFile(t *testing.T) {
tmpDir := utiltesting.MkTmpdirOrDie("nfsProvisionTest")
defer os.RemoveAll(tmpDir)
conf := tmpDir + "/test"
_, err := os.Create(conf)
if err != nil {
t.Errorf("Error creating file %s: %v", conf, err)
}
toAdd := "abc\nxyz\n"
addToFile(&sync.Mutex{}, conf, toAdd)
read, _ := ioutil.ReadFile(conf)
if toAdd != string(read) {
t.Errorf("Expected %s but got %s", toAdd, string(read))
}
toRemove := toAdd
removeFromFile(&sync.Mutex{}, conf, toRemove)
read, _ = ioutil.ReadFile(conf)
if "" != string(read) {
t.Errorf("Expected %s but got %s", "", string(read))
}
}
func TestGetExistingIDs(t *testing.T) {
tmpDir := utiltesting.MkTmpdirOrDie("nfsProvisionTest")
defer os.RemoveAll(tmpDir)
tests := []struct {
name string
useGanesha bool
configContents string
re *regexp.Regexp
expectedExportIDs map[uint16]bool
expectError bool
}{
{
name: "ganesha exports 1, 3",
configContents: "\nEXPORT\n{\n" +
"\tExport_Id = 1;\n" +
"\tFilesystem_id = 1.1;\n" +
"\tFSAL {\n\t\tName = VFS;\n\t}\n}\n" +
"\nEXPORT\n{\n" +
"\tExport_Id = 3;\n" +
"\tFilesystem_id = 1.1;\n" +
"\tFSAL {\n\t\tName = VFS;\n\t}\n}\n",
re: regexp.MustCompile("Export_Id = ([0-9]+);"),
expectedExportIDs: map[uint16]bool{1: true, 3: true},
expectError: false,
},
{
name: "kernel exports 1, 3",
configContents: "\n foo *(rw,insecure,root_squash,fsid=1)\n" +
"\n bar *(rw,insecure,root_squash,fsid=3)\n",
re: regexp.MustCompile("fsid=([0-9]+)"),
expectedExportIDs: map[uint16]bool{1: true, 3: true},
expectError: false,
},
{
name: "bad regex",
configContents: "\nEXPORT\n{\n" +
"\tExport_Id = 1;\n" +
"\tFilesystem_id = 1.1;\n" +
"\tFSAL {\n\t\tName = VFS;\n\t}\n}\n",
re: regexp.MustCompile("Export_Id = [0-9]+;"),
expectedExportIDs: map[uint16]bool{},
expectError: true,
},
}
for i, test := range tests {
conf := tmpDir + "/test" + "-" + strconv.Itoa(i)
err := ioutil.WriteFile(conf, []byte(test.configContents), 0755)
if err != nil {
t.Errorf("Error writing file %s: %v", conf, err)
}
exportIDs, err := getExistingIDs(conf, test.re)
evaluate(t, test.name, test.expectError, err, test.expectedExportIDs, exportIDs, "export ids")
}
}
func TestGetServer(t *testing.T) {
tmpDir := utiltesting.MkTmpdirOrDie("nfsProvisionTest")
defer os.RemoveAll(tmpDir)
// It should be node or service...but in case both exist, instead of failing
// just test for node > service > podIP (doesn't really matter after all)
tests := []struct {
name string
objs []runtime.Object
podIP string
service string
namespace string
node string
serverHostname string
outOfCluster bool
expectedServer string
expectError bool
}{
{
name: "valid node only",
objs: []runtime.Object{},
podIP: "2.2.2.2",
service: "",
namespace: "",
node: "127.0.0.1",
expectedServer: "127.0.0.1",
expectError: false,
},
{
name: "valid node, valid service, should use node",
objs: []runtime.Object{
newService("foo", "1.1.1.1"),
newEndpoints("foo", []string{"2.2.2.2"}, []endpointPort{{2049, v1.ProtocolTCP}, {20048, v1.ProtocolTCP}, {111, v1.ProtocolUDP}, {111, v1.ProtocolTCP}}),
},
podIP: "2.2.2.2",
service: "foo",
namespace: "default",
node: "127.0.0.1",
expectedServer: "127.0.0.1",
expectError: false,
},
{
name: "invalid service, valid node, should use node",
objs: []runtime.Object{
newService("foo", "1.1.1.1"),
newEndpoints("foo", []string{"3.3.3.3"}, []endpointPort{{2049, v1.ProtocolTCP}, {20048, v1.ProtocolTCP}, {111, v1.ProtocolUDP}, {111, v1.ProtocolTCP}}),
},
podIP: "2.2.2.2",
service: "foo",
namespace: "default",
node: "127.0.0.1",
expectedServer: "127.0.0.1",
expectError: false,
},
{
name: "valid service only",
objs: []runtime.Object{
newService("foo", "1.1.1.1"),
newEndpoints("foo", []string{"2.2.2.2"}, []endpointPort{{2049, v1.ProtocolTCP}, {20048, v1.ProtocolTCP}, {111, v1.ProtocolUDP}, {111, v1.ProtocolTCP}}),
},
podIP: "2.2.2.2",
service: "foo",
namespace: "default",
node: "",
expectedServer: "1.1.1.1",
expectError: false,
},
{
name: "valid service but no namespace",
objs: []runtime.Object{
newService("foo", "1.1.1.1"),
newEndpoints("foo", []string{"2.2.2.2"}, []endpointPort{{2049, v1.ProtocolTCP}, {20048, v1.ProtocolTCP}, {111, v1.ProtocolUDP}, {111, v1.ProtocolTCP}}),
},
podIP: "2.2.2.2",
service: "foo",
namespace: "",
node: "",
expectedServer: "",
expectError: true,
},
{
name: "invalid service, ports don't match exactly",
objs: []runtime.Object{
newService("foo", "1.1.1.1"),
newEndpoints("foo", []string{"2.2.2.2"}, []endpointPort{{2049, v1.ProtocolTCP}, {20048, v1.ProtocolTCP}, {111, v1.ProtocolUDP}, {999999, v1.ProtocolTCP}}),
},
podIP: "2.2.2.2",
service: "foo",
namespace: "default",
node: "",
expectedServer: "",
expectError: true,
},
{
name: "invalid service, points to different pod IP",
objs: []runtime.Object{
newService("foo", "1.1.1.1"),
newEndpoints("foo", []string{"3.3.3.3"}, []endpointPort{{2049, v1.ProtocolTCP}, {20048, v1.ProtocolTCP}, {111, v1.ProtocolUDP}, {111, v1.ProtocolTCP}}),
},
podIP: "2.2.2.2",
service: "foo",
namespace: "default",
node: "",
expectedServer: "",
expectError: true,
},
{
name: "service but no pod IP to check if valid",
objs: []runtime.Object{
newService("foo", "1.1.1.1"),
newEndpoints("foo", []string{"2.2.2.2"}, []endpointPort{{2049, v1.ProtocolTCP}, {20048, v1.ProtocolTCP}, {111, v1.ProtocolUDP}, {111, v1.ProtocolTCP}}),
},
podIP: "",
service: "foo",
namespace: "",
node: "",
expectedServer: "",
expectError: true,
},
{
name: "no service, no node, should use podIP",
objs: []runtime.Object{},
podIP: "2.2.2.2",
service: "",
namespace: "",
node: "",
expectedServer: "2.2.2.2",
expectError: false,
},
{
name: "server-hostname is ignored, valid node",
objs: []runtime.Object{},
podIP: "2.2.2.2",
service: "",
namespace: "",
node: "127.0.0.1",
serverHostname: "foo",
expectedServer: "127.0.0.1",
expectError: false,
},
{
name: "server-hostname takes precedence when out-of-cluster",
objs: []runtime.Object{},
podIP: "2.2.2.2",
service: "",
namespace: "",
node: "127.0.0.1",
serverHostname: "foo",
outOfCluster: true,
expectedServer: "foo",
expectError: false,
},
}
for _, test := range tests {
if test.podIP != "" {
os.Setenv(podIPEnv, test.podIP)
}
if test.service != "" {
os.Setenv(serviceEnv, test.service)
}
if test.namespace != "" {
os.Setenv(namespaceEnv, test.namespace)
}
if test.node != "" {
os.Setenv(nodeEnv, test.node)
}
client := fake.NewSimpleClientset(test.objs...)
p := newNFSProvisionerInternal(tmpDir+"/", client, test.outOfCluster, &testExporter{}, newDummyQuotaer(), test.serverHostname, -1, "*")
server, err := p.getServer()
evaluate(t, test.name, test.expectError, err, test.expectedServer, server, "server")
os.Unsetenv(podIPEnv)
os.Unsetenv(serviceEnv)
os.Unsetenv(namespaceEnv)
os.Unsetenv(nodeEnv)
}
}
func newClaim(capacity resource.Quantity, accessmodes []v1.PersistentVolumeAccessMode, selector *metav1.LabelSelector) *v1.PersistentVolumeClaim {
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: accessmodes,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): capacity,
},
},
Selector: selector,
},
Status: v1.PersistentVolumeClaimStatus{},
}
return claim
}
func newService(name, clusterIP string) *v1.Service {
return &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "default",
},
Spec: v1.ServiceSpec{
ClusterIP: clusterIP,
},
}
}
type endpointPort struct {
port int32
protocol v1.Protocol
}
func newEndpoints(name string, ips []string, ports []endpointPort) *v1.Endpoints {
epAddresses := []v1.EndpointAddress{}
for _, ip := range ips {
epAddresses = append(epAddresses, v1.EndpointAddress{IP: ip, Hostname: "", NodeName: nil, TargetRef: nil})
}
epPorts := []v1.EndpointPort{}
for i, port := range ports {
epPorts = append(epPorts, v1.EndpointPort{Name: strconv.Itoa(i), Port: port.port, Protocol: port.protocol})
}
return &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "default",
},
Subsets: []v1.EndpointSubset{
{
Addresses: epAddresses,
NotReadyAddresses: []v1.EndpointAddress{},
Ports: epPorts,
},
},
}
}
type testExporter struct {
*exportMap
config string
}
var _ exporter = &testExporter{}
func (e *testExporter) CanExport(limit int) bool {
if e.exportMap != nil {
return e.exportMap.CanExport(limit)
}
return true
}
func (e *testExporter) AddExportBlock(path string, _ bool, _ string) (string, uint16, error) {
id := uint16(1)
for ; id <= math.MaxUint16; id++ {
if _, ok := e.exportIDs[id]; !ok {
break
}
}
return fmt.Sprintf("\nExport_Id = %d;\n", id), id, nil
}
func (e *testExporter) RemoveExportBlock(block string, exportID uint16) error {
return nil
}
func (e *testExporter) Export(path string) error {
if strings.Contains(path, "FAIL_TO_EXPORT_ME") {
return errors.New("fake error")
}
return nil
}
func (e *testExporter) Unexport(volume *v1.PersistentVolume) error {
return nil
}
func evaluate(t *testing.T, name string, expectError bool, err error, expected interface{}, got interface{}, output string) {
if !expectError && err != nil {
t.Logf("test case: %s", name)
t.Errorf("unexpected error getting %s: %v", output, err)
} else if expectError && err == nil {
t.Logf("test case: %s", name)
t.Errorf("expected error but got %s: %v", output, got)
} else if !reflect.DeepEqual(expected, got) {
t.Logf("test case: %s", name)
t.Errorf("expected %s %v but got %s %v", output, expected, output, got)
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"regexp"
"strconv"
"strings"
"sync"
"github.com/golang/glog"
"github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/mount"
)
type quotaer interface {
AddProject(string, string) (string, uint16, error)
RemoveProject(string, uint16) error
SetQuota(uint16, string, string) error
UnsetQuota() error
}
type xfsQuotaer struct {
xfsPath string
// The file where we store mappings between project ids and directories, and
// each project's quota limit information, for backup.
// Similar to http://man7.org/linux/man-pages/man5/projects.5.html
projectsFile string
projectIDs map[uint16]bool
mapMutex *sync.Mutex
fileMutex *sync.Mutex
}
var _ quotaer = &xfsQuotaer{}
func newXfsQuotaer(xfsPath string) (*xfsQuotaer, error) {
if _, err := os.Stat(xfsPath); os.IsNotExist(err) {
return nil, fmt.Errorf("xfs path %s does not exist", xfsPath)
}
isXfs, err := isXfs(xfsPath)
if err != nil {
return nil, fmt.Errorf("error checking if xfs path %s is an XFS filesystem: %v", xfsPath, err)
}
if !isXfs {
return nil, fmt.Errorf("xfs path %s is not an XFS filesystem", xfsPath)
}
entry, err := getMountEntry(path.Clean(xfsPath), "xfs")
if err != nil {
return nil, err
}
if !strings.Contains(entry.VfsOpts, "pquota") && !strings.Contains(entry.VfsOpts, "prjquota") {
return nil, fmt.Errorf("xfs path %s was not mounted with pquota nor prjquota", xfsPath)
}
_, err = exec.LookPath("xfs_quota")
if err != nil {
return nil, err
}
projectsFile := path.Join(xfsPath, "projects")
projectIDs := map[uint16]bool{}
_, err = os.Stat(projectsFile)
if os.IsNotExist(err) {
file, cerr := os.Create(projectsFile)
if cerr != nil {
return nil, fmt.Errorf("error creating xfs projects file %s: %v", projectsFile, cerr)
}
file.Close()
} else {
re := regexp.MustCompile("(?m:^([0-9]+):/.+$)")
projectIDs, err = getExistingIDs(projectsFile, re)
if err != nil {
glog.Errorf("error while populating projectIDs map, there may be errors setting quotas later if projectIDs are reused: %v", err)
}
}
xfsQuotaer := &xfsQuotaer{
xfsPath: xfsPath,
projectsFile: projectsFile,
projectIDs: projectIDs,
mapMutex: &sync.Mutex{},
fileMutex: &sync.Mutex{},
}
err = xfsQuotaer.restoreQuotas()
if err != nil {
return nil, fmt.Errorf("error restoring quotas from projects file %s: %v", projectsFile, err)
}
return xfsQuotaer, nil
}
func isXfs(xfsPath string) (bool, error) {
cmd := exec.Command("stat", "-f", "-c", "%T", xfsPath)
out, err := cmd.Output()
if err != nil {
return false, err
}
if strings.TrimSpace(string(out)) != "xfs" {
return false, nil
}
return true, nil
}
func getMountEntry(mountpoint, fstype string) (*mount.Info, error) {
entries, err := mount.GetMounts()
if err != nil {
return nil, err
}
for _, e := range entries {
if e.Mountpoint == mountpoint && e.Fstype == fstype {
return e, nil
}
}
return nil, fmt.Errorf("mount entry for mountpoint %s, fstype %s not found", mountpoint, fstype)
}
func (q *xfsQuotaer) restoreQuotas() error {
read, err := ioutil.ReadFile(q.projectsFile)
if err != nil {
return err
}
re := regexp.MustCompile("(?m:\n^([0-9]+):(.+):(.+)$\n)")
matches := re.FindAllSubmatch(read, -1)
for _, match := range matches {
projectID, _ := strconv.ParseUint(string(match[1]), 10, 16)
directory := string(match[2])
bhard := string(match[3])
// If directory referenced by projects file no longer exists, don't set a
// quota for it: will fail
if _, err := os.Stat(directory); os.IsNotExist(err) {
q.RemoveProject(string(match[0]), uint16(projectID))
continue
}
if err := q.SetQuota(uint16(projectID), directory, bhard); err != nil {
return fmt.Errorf("error restoring quota for directory %s: %v", directory, err)
}
}
return nil
}
func (q *xfsQuotaer) AddProject(directory, bhard string) (string, uint16, error) {
projectID := generateID(q.mapMutex, q.projectIDs)
projectIDStr := strconv.FormatUint(uint64(projectID), 10)
// Store project:directory mapping and also project's quota info
block := "\n" + projectIDStr + ":" + directory + ":" + bhard + "\n"
// Add the project block to the projects file
if err := addToFile(q.fileMutex, q.projectsFile, block); err != nil {
deleteID(q.mapMutex, q.projectIDs, projectID)
return "", 0, fmt.Errorf("error adding project block %s to projects file %s: %v", block, q.projectsFile, err)
}
// Specify the new project
cmd := exec.Command("xfs_quota", "-x", "-c", fmt.Sprintf("project -s -p %s %s", directory, projectIDStr), q.xfsPath)
out, err := cmd.CombinedOutput()
if err != nil {
deleteID(q.mapMutex, q.projectIDs, projectID)
removeFromFile(q.fileMutex, q.projectsFile, block)
return "", 0, fmt.Errorf("xfs_quota failed with error: %v, output: %s", err, out)
}
return block, projectID, nil
}
func (q *xfsQuotaer) RemoveProject(block string, projectID uint16) error {
deleteID(q.mapMutex, q.projectIDs, projectID)
return removeFromFile(q.fileMutex, q.projectsFile, block)
}
func (q *xfsQuotaer) SetQuota(projectID uint16, directory, bhard string) error {
if !q.projectIDs[projectID] {
return fmt.Errorf("project with id %v has not been added", projectID)
}
projectIDStr := strconv.FormatUint(uint64(projectID), 10)
cmd := exec.Command("xfs_quota", "-x", "-c", fmt.Sprintf("limit -p bhard=%s %s", bhard, projectIDStr), q.xfsPath)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("xfs_quota failed with error: %v, output: %s", err, out)
}
return nil
}
func (q *xfsQuotaer) UnsetQuota() error {
return nil
}
type dummyQuotaer struct{}
var _ quotaer = &dummyQuotaer{}
func newDummyQuotaer() *dummyQuotaer {
return &dummyQuotaer{}
}
func (q *dummyQuotaer) AddProject(_, _ string) (string, uint16, error) {
return "", 0, nil
}
func (q *dummyQuotaer) RemoveProject(_ string, _ uint16) error {
return nil
}
func (q *dummyQuotaer) SetQuota(_ uint16, _, _ string) error {
return nil
}
func (q *dummyQuotaer) UnsetQuota() error {
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
"io/ioutil"
"math"
"os"
"regexp"
"strconv"
"strings"
"sync"
)
// generateID generates a unique exportID to assign an export
func generateID(mutex *sync.Mutex, ids map[uint16]bool) uint16 {
mutex.Lock()
id := uint16(1)
for ; id <= math.MaxUint16; id++ {
if _, ok := ids[id]; !ok {
break
}
}
ids[id] = true
mutex.Unlock()
return id
}
func deleteID(mutex *sync.Mutex, ids map[uint16]bool, id uint16) {
mutex.Lock()
delete(ids, id)
mutex.Unlock()
}
// getExistingIDs populates a map with existing ids found in the given config
// file using the given regexp. Regexp must have a "digits" submatch.
func getExistingIDs(config string, re *regexp.Regexp) (map[uint16]bool, error) {
ids := map[uint16]bool{}
digitsRe := "([0-9]+)"
if !strings.Contains(re.String(), digitsRe) {
return ids, fmt.Errorf("regexp %s doesn't contain digits submatch %s", re.String(), digitsRe)
}
read, err := ioutil.ReadFile(config)
if err != nil {
return ids, err
}
allMatches := re.FindAllSubmatch(read, -1)
for _, match := range allMatches {
digits := match[1]
if id, err := strconv.ParseUint(string(digits), 10, 16); err == nil {
ids[uint16(id)] = true
}
}
return ids, nil
}
func addToFile(mutex *sync.Mutex, path string, toAdd string) error {
mutex.Lock()
file, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
mutex.Unlock()
return err
}
defer file.Close()
if _, err = file.WriteString(toAdd); err != nil {
mutex.Unlock()
return err
}
file.Sync()
mutex.Unlock()
return nil
}
func removeFromFile(mutex *sync.Mutex, path string, toRemove string) error {
mutex.Lock()
read, err := ioutil.ReadFile(path)
if err != nil {
mutex.Unlock()
return err
}
removed := strings.Replace(string(read), toRemove, "", -1)
err = ioutil.WriteFile(path, []byte(removed), 0)
if err != nil {
mutex.Unlock()
return err
}
mutex.Unlock()
return nil
}
/vendor
/src
\ No newline at end of file
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"testing"
"k8s.io/kubernetes/test/e2e/framework"
// test sources
_ "k8s.io/kubernetes/test/e2e/storage"
)
func init() {
framework.ViperizeFlags()
}
func TestE2E(t *testing.T) {
RunE2ETests(t)
}
module github.com/kubernetes-incubator/external-storage/nfs/test/e2e
go 1.12
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"io/ioutil"
"os/exec"
"path/filepath"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
manifestPath = "test/e2e/testing-manifests/"
nfsStatefulSetName = "nfs-provisioner"
nfsRBACCRName = "nfs-provisioner-runner"
nfsRBACCRBName = "run-nfs-provisioner"
nfsClaimName = "nfs"
nfsClaimSize = "1Mi"
nfsClassName = "example-nfs"
nfsWritePodName = "write-pod"
nfsReadPodName = "read-pod"
)
var _ = Describe("external-storage", func() {
f := framework.NewDefaultFramework("external-storage")
// filled in BeforeEach
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
AfterEach(func() {
c.RbacV1().ClusterRoles().Delete(nfsRBACCRName, nil)
c.RbacV1().ClusterRoleBindings().Delete(nfsRBACCRBName, nil)
c.StorageV1().StorageClasses().Delete(nfsClassName, nil)
})
Describe("NFS external provisioner", func() {
mkpath := func(file string) string {
return filepath.Join(manifestPath, file)
}
It("should create and delete persistent volumes when deployed with yamls", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("creating nfs-provisioner RBAC")
cmd := exec.Command("bash", "-c", fmt.Sprintf("sed -i'' 's/namespace:.*/namespace: %s/g' %s", ns, mkpath("rbac.yaml")))
framework.ExpectNoError(cmd.Run())
framework.RunKubectlOrDie("create", "-f", mkpath("rbac.yaml"), nsFlag)
By("creating an nfs-provisioner statefulset")
tmpDir, err := ioutil.TempDir("", "nfs-provisioner-statefulset")
Expect(err).NotTo(HaveOccurred())
cmd = exec.Command("bash", "-c", fmt.Sprintf("sed -i'' 's|path:.*|path: %s|g' %s", tmpDir, mkpath("statefulset.yaml")))
framework.ExpectNoError(cmd.Run())
cmd = exec.Command("bash", "-c", fmt.Sprintf("sed -i'' '/-provisioner=/a \\ - \"-grace-period=10\"' %s", mkpath("statefulset.yaml")))
framework.ExpectNoError(cmd.Run())
framework.RunKubectlOrDie("create", "-f", mkpath("statefulset.yaml"), nsFlag)
ss, err := c.AppsV1().StatefulSets(ns).Get(nfsStatefulSetName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
sst := framework.NewStatefulSetTester(c)
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
By("creating a class")
framework.RunKubectlOrDie("create", "-f", mkpath("class.yaml"))
By("checking the class")
class, err := c.StorageV1beta1().StorageClasses().Get(nfsClassName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("creating a claim")
framework.RunKubectlOrDie("create", "-f", mkpath("claim.yaml"), nsFlag)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, nfsClaimName, framework.Poll, 60*time.Second)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
// Get new copy of the claim
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Get(nfsClaimName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("checking the volume")
// Get the bound PV
pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Check sizes
expectedCapacity := resource.MustParse(nfsClaimSize)
pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity")
// Check PV properties
expectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(*class.ReclaimPolicy))
Expect(pv.Spec.MountOptions).To(Equal(class.MountOptions))
By("creating a pod to write to the volume")
framework.RunKubectlOrDie("create", "-f", mkpath("write-pod.yaml"), nsFlag)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, nfsWritePodName, ns))
framework.DeletePodOrFail(c, ns, nfsWritePodName)
By("creating a pod to read from the volume")
framework.RunKubectlOrDie("create", "-f", mkpath("read-pod.yaml"), nsFlag)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, nfsReadPodName, ns))
framework.DeletePodOrFail(c, ns, nfsReadPodName)
By("scaling the nfs-provisioner statefulset down and up")
sst.Restart(ss)
By("creating a pod to read from the volume again")
framework.RunKubectlOrDie("create", "-f", mkpath("read-pod.yaml"), nsFlag)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, nfsReadPodName, ns))
framework.DeletePodOrFail(c, ns, nfsReadPodName)
By("deleting the claim")
err = c.CoreV1().PersistentVolumeClaims(ns).Delete(nfsClaimName, nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
}
By("waiting for the volume to be deleted")
if pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, 60*time.Second))
}
})
})
})
#!/bin/bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Vendoring the e2e framework is too hard. Download kubernetes source, patch
# our tests on top of it, build and run from there.
KUBE_VERSION=1.11.0
TEST_DIR=$GOPATH/src/github.com/kubernetes-incubator/external-storage/nfs/test/e2e
GOPATH=$TEST_DIR
# Download kubernetes source
if [ ! -e "$GOPATH/src/k8s.io/kubernetes" ]; then
mkdir -p $GOPATH/src/k8s.io
curl -L https://github.com/kubernetes/kubernetes/archive/v${KUBE_VERSION}.tar.gz | tar xz -C $TEST_DIR/src/k8s.io/
rm -rf $GOPATH/src/k8s.io/kubernetes
mv $GOPATH/src/k8s.io/kubernetes-$KUBE_VERSION $GOPATH/src/k8s.io/kubernetes
fi
cd $GOPATH/src/k8s.io/kubernetes
# Clean some unneeded sources
find ./test/e2e -maxdepth 1 -type d ! -name 'e2e' ! -name 'framework' ! -name 'manifest' ! -name 'common' ! -name 'generated' ! -name 'testing-manifests' ! -name 'perftype' -exec rm -r {} +
find ./test/e2e -maxdepth 1 -type f \( -name 'examples.go' -o -name 'gke_local_ssd.go' -o -name 'gke_node_pools.go' \) -delete
find ./test/e2e/testing-manifests -maxdepth 1 ! -name 'testing-manifests' ! -name 'BUILD' -exec rm -r {} +
# Copy our sources
mkdir ./test/e2e/storage
cp $TEST_DIR/nfs.go ./test/e2e/storage/
rm ./test/e2e/e2e_test.go
cp $TEST_DIR/e2e_test.go ./test/e2e/
cp -r $TEST_DIR/testing-manifests/* ./test/e2e/testing-manifests
# Build e2e.test
./build/run.sh make KUBE_BUILD_PLATFORMS=linux/amd64 WHAT=test/e2e/e2e.test &> /dev/null
# Download kubectl to _output directory
if [ ! -e "$HOME/bin/kubectl" ]; then
curl -o $HOME/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
chmod +x $HOME/bin/kubectl
fi
# Run tests assuming local cluster i.e. one started with hack/local-up-cluster.sh
./_output/dockerized/bin/linux/amd64/e2e.test --provider=local --ginkgo.focus=external-storage --kubeconfig=$HOME/.kube/config
../../deploy/kubernetes
\ No newline at end of file
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!