Container must have privileged mode. Refer here
Example YAML which mounts to cic_code
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: citrix
rules:
- apiGroups: [""]
resources: ["endpoints", "ingresses", "services", "pods", "secrets", "nodes", "routes", "namespaces","tokenreviews","subjectaccessreview"]
verbs: ["get", "list", "watch"]
# services/status is needed to update the loadbalancer IP in service status for integrating
# service of type LoadBalancer with external-dns
- apiGroups: [""]
resources: ["services/status"]
verbs: ["patch"]
- apiGroups: ["extensions"]
resources: ["ingresses", "ingresses/status"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "list", "watch"]
- apiGroups: ["citrix.com"]
resources: ["rewritepolicies", "canarycrds", "authpolicies", "ratelimits"]
verbs: ["get", "list", "watch"]
- apiGroups: ["citrix.com"]
resources: ["rewritepolicies/status", "canarycrds/status", "authpolicies/status", "ratelimits/status"]
verbs: ["get", "list", "patch"]
- apiGroups: ["citrix.com"]
resources: ["vips"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: ["route.openshift.io"]
resources: ["routes"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: citrix
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: citrix
subjects:
- kind: ServiceAccount
name: citrix
namespace: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: citrix
namespace: default
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: cpx-cic
spec:
replicas: 1
template:
metadata:
name: cpx-cic
labels:
app: cpx-cic
annotations:
spec:
serviceAccountName: citrix
containers:
- name: cpx
#image: "quay.io/citrix/citrix-k8s-cpx-ingress:13.0-36.29"
image: "quay.io/citrix/citrix-k8s-cpx-ingress:13.0-58.30"
securityContext:
privileged: true
env:
- name: "EULA"
value: "yes"
- name: "KUBERNETES_TASK_ID"
value: ""
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
imagePullPolicy: Always
# Add cic as a sidecar
- name: cic
image: "dpkumar/namefix1:1.8.19"
securityContext:
runAsUser: 0
privileged: true
command: [ "/bin/bash", "-c", "--" ]
args: [ "cp -r /usr/src/triton /cic_code/;while true; do sleep 30; done;" ]
imagePullPolicy: Always
env:
- name: "EULA"
value: "yes"
- name: "NS_IP"
value: "127.0.0.1"
- name: "NS_USER"
value: "nsroot"
- name: "NS_PASSWORD"
value: "nsroot"
- name: "NS_PROTOCOL"
value: "HTTP"
- name: "NS_PORT"
value: "80"
- name: "NS_DEPLOYMENT_MODE"
value: "SIDECAR"
- name: "NS_ENABLE_MONITORING"
value: "YES"
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: NAMESPACE
value: "sharedsvcs"
volumeMounts:
- mountPath: /cic_code
name: host-mount
#args:
# - --default-ssl-certificate
# $(POD_NAMESPACE)/default-cert
#imagePullPolicy: Always
volumes:
- name: host-mount
hostPath:
path: /cic_code
type: Directory
nodeSelector:
"node-role.kubernetes.io/infra": "true"