Prismo, le Reddit du fediverse
Rédigé par dada / 13 novembre 2018 / 6 commentaires
Rédigé par dada / 13 novembre 2018 / 6 commentaires
Rédigé par dada / 11 novembre 2018 / Aucun commentaire
FROM php:7.0-apache
WORKDIR /var/www/html
RUN apt update
RUN apt install -y wget unzip
RUN wget https://git.dadall.info/dada/pluxml/raw/master/pluxml-latest.zip
RUN mv pluxml-latest.zip /usr/src/
#VOLUME
VOLUME /var/www/html
RUN a2enmod rewrite
RUN service apache2 restart
RUN apt-get update && apt-get install -y \
libfreetype6-dev \
libjpeg62-turbo-dev \
libpng-dev \
&& docker-php-ext-install -j$(nproc) iconv \
&& docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ \
&& docker-php-ext-install -j$(nproc) gd
# Expose
EXPOSE 80
COPY entrypoint.sh /usr/local/bin/
ENTRYPOINT ["entrypoint.sh"]
CMD ["apache2-foreground"]
#!/bin/bash
if [ ! -e index.php ]; then
unzip /usr/src/pluxml-latest.zip -d /var/www/html/
mv /var/www/html/PluXml/* /var/www/html
rm -rf /var/www/html/PluXml
chown -R www-data: /var/www/html
fi
exec "$@"
docker build -t pluxml-5.6 .
Successfully built d554d0753425
Successfully tagged pluxml-5.6:latest
dada@k8smaster:~/pluxml$ cat pluxml.yaml
apiVersion: v1
kind: Service
metadata:
name: pluxml
labels:
app: pluxml
spec:
ports:
- port: 80
selector:
app: pluxml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: plx-pv-claim
labels:
app: pluxml
spec:
storageClassName: rook-ceph-block
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Mi
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: pluxml
labels:
app: pluxml
spec:
strategy:
type: Recreate
template:
metadata:
labels:
app: pluxml
spec:
containers:
- image: dadall/pluxml-5.6:latest
imagePullPolicy: "Always"
name: pluxml
ports:
- containerPort: 80
name: pluxml
volumeMounts:
- name: pluxml-persistent-storage
mountPath: /var/www/html
volumes:
- name: pluxml-persistent-storage
persistentVolumeClaim:
claimName: plx-pv-claim
dada@k8smaster:~/pluxml$ cat pluxml-ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
name: pluxml-ingress
spec:
backend:
serviceName: pluxml
servicePort: 80
dada@k8smaster:~/pluxml$ kubectl create -f pluxml.yaml
dada@k8smaster:~/pluxml$ kubectl create -f pluxml-ingress.yaml
dada@k8smaster:~/pluxml$ kubectl get pods --all-namespaces -o wide | grep plux
default pluxml-686f7d486-7p5sq 1/1 Running 0 82m 10.244.2.164 k8snode2 <none>
dada@k8smaster:~/pluxml$ kubectl describe svc pluxml
Name: pluxml
Namespace: default
Labels: app=pluxml
Annotations: <none>
Selector: app=pluxml
Type: ClusterIP
IP: 10.100.177.201
Port: <unset> 80/TCP
TargetPort: 80/TCP
Endpoints: 10.244.1.31:80
Session Affinity: None
Events: <none>
dada@k8smaster:~/pluxml$ kubectl describe ingress pluxml
Name: pluxml-ingress
Namespace: default
Address:
Default backend: pluxml:80 (10.244.1.31:80,10.244.2.164:80)
Rules:
Host Path Backends
---- ---- --------
* * pluxml:80 (10.244.1.31:80,10.244.2.164:80)
Annotations:
kubernetes.io/ingress.class: nginx
Events: <none>
Rédigé par dada / 09 novembre 2018 / Aucun commentaire
apiVersion: v1
kind: Service
metadata:
name: wordpress-mysql
labels:
app: wordpress
spec:
ports:
- port: 3306
selector:
app: wordpress
tier: mysql
clusterIP: None
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pv-claim2
labels:
app: wordpress
spec:
storageClassName: rook-ceph-block
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Mi
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: wordpress-mysql
labels:
app: wordpress
spec:
strategy:
type: Recreate
template:
metadata:
labels:
app: wordpress
tier: mysql
spec:
containers:
- image: mysql:5.6
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: changeme
ports:
- containerPort: 3306
name: mysql
volumeMounts:
- name: mysql-persistent-storage
mountPath: /var/lib/mysql
volumes:
- name: mysql-persistent-storage
persistentVolumeClaim:
claimName: mysql-pv-claim2
dada@k8smaster:~$ kubectl create -f mysql.yaml
default wordpress-mysql-75477bf794-89mzw 1/1 Running 1 5h38m 10.244.2.50 k8snode2 <none>
apiVersion: v1
kind: Service
metadata:
name: wordpress
labels:
app: wordpress
spec:
ports:
- port: 80
selector:
app: wordpress
tier: frontend
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: wp-pv-claim
labels:
app: wordpress
spec:
storageClassName: rook-ceph-block
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Mi
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wordpress
labels:
app: wordpress
spec:
strategy:
type: Recreate
template:
metadata:
labels:
app: wordpress
tier: frontend
spec:
containers:
- image: wordpress:4.6.1-apache
name: wordpress
env:
- name: WORDPRESS_DB_HOST
value: wordpress-mysql
- name: WORDPRESS_DB_PASSWORD
value: changeme
ports:
- containerPort: 80
name: wordpress
volumeMounts:
- name: wordpress-persistent-storage
mountPath: /var/www/html
volumes:
- name: wordpress-persistent-storage
persistentVolumeClaim:
claimName: wp-pv-claim
kubectl create -f wordpress.yaml
default wordpress-796698694f-sxbhb 1/1 Running 1 3h35m 10.244.2.51 k8snode2 <none>
dada@k8smaster:~/$ kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 4d8h
my-nginx-nginx-ingress-controller LoadBalancer 10.100.45.147 192.168.0.50 80:32462/TCP,443:30337/TCP 3d5h
my-nginx-nginx-ingress-default-backend ClusterIP 10.109.228.138 <none> 80/TCP 3d5h
wordpress ClusterIP 10.102.55.203 <none> 80/TCP 6m3s
wordpress-mysql ClusterIP None <none> 3306/TCP 6h33m
dada@k8smaster:~/$ kubectl describe service wordpress
Name: wordpress
Namespace: default
Labels: app=wordpress
Annotations: <none>
Selector: app=wordpress,tier=frontend
Type: ClusterIP
IP: 10.102.55.203
Port: <unset> 80/TCP
TargetPort: 80/TCP
Endpoints: 10.244.2.61:80
Session Affinity: None
Events: <none>
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
name: nginx-ingress
spec:
backend:
serviceName: wordpress
servicePort: 80
kubectl create -f ingress.yaml
dada@k8smaster:~/$ kubectl get ingresses
NAME HOSTS ADDRESS PORTS AGE
nginx-ingress * 80 6s
dada@k8smaster:~/$ kubectl describe ingress nginx-ingress
Name: nginx-ingress
Namespace: default
Address:
Default backend: wordpress:80 (10.244.2.61:80)
Rules:
Host Path Backends
---- ---- --------
* * wordpress:80 (10.244.2.61:80)
Annotations:
kubernetes.io/ingress.class: nginx
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal CREATE 65s nginx-ingress-controller Ingress default/nginx-ingress
Normal UPDATE 30s nginx-ingress-controller Ingress default/nginx-ingress
Rédigé par dada / 09 novembre 2018 / Aucun commentaire
helm install stable/nginx-ingress --name my-nginx
dada@k8smaster:~$ kubectget pods -n default
NAME READY STATUS RESTARTS AGE
my-nginx-nginx-ingress-controller-565bc9555b-pcqjz 1/1 Running 2 8h
my-nginx-nginx-ingress-default-backend-5bcb65f5f4-728tx 1/1 Running 2 8h
dada@k8smaster:~$ kubectl --namespace default get services -o wide -w my-nginx-nginx-ingress-controller
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
my-nginx-nginx-ingress-controller LoadBalancer 10.100.45.147 <pending> 80:32462/TCP,443:30337/TCP 8s app=nginx-ingress,component=controller,release=my-nginx
helm install --name metallb stable/metallb
dada@k8smaster:~$ kubectl get pods -n metallb-system
NAME READY STATUS RESTARTS AGE
controller-765899887-ck6bv 1/1 Running 2 8h
speaker-jdqg9 1/1 Running 2 8h
speaker-t4vtk 1/1 Running 2 8h
apiVersion: v1
kind: ConfigMap
metadata:
namespace: default
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- 192.168.0.50-192.168.0.60
kubectl create -f config.yaml
dada@k8smaster:~/$ kubectl --namespace default get services -o wide -w my-nginx-nginx-ingress-controller
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
my-nginx-nginx-ingress-controller LoadBalancer 10.100.45.147 192.168.0.50 80:32462/TCP,443:30337/TCP 3d3h app=nginx-ingress,component=controller,release=my-nginx
Rédigé par dada / 08 novembre 2018 / 4 commentaires
apt-get install ceph-fs-common ceph-common
cd /bin
sudo curl -O https://raw.githubusercontent.com/ceph/ceph-docker/master/examples/kubernetes-coreos/rbd
sudo chmod +x /bin/rbd
rbd #Command to download ceph images
dada@k8smaster:~$ helm repo add rook-beta https://charts.rook.io/beta
"rook-beta" has been added to your repositories
dada@k8smaster:~$ helm install --namespace rook-ceph-system rook-beta/rook-ceph
NAME: torrid-dragonfly
LAST DEPLOYED: Sun Nov 4 11:22:24 2018
NAMESPACE: rook-ceph-system
STATUS: DEPLOYED
dada@k8smaster:~$ kubectl --namespace rook-ceph-system get pods -l "app=rook-ceph-operator"
NAME READY STATUS RESTARTS AGE
rook-ceph-operator-f4cd7f8d5-zt7f4 1/1 Running 0 2m25
dada@k8smaster:~$ kubectl get pods --all-namespaces -o wide | grep rook
rook-ceph-system rook-ceph-agent-pb62s 1/1 Running 0 4m10s 192.168.0.30 k8snode1 <none
rook-ceph-system rook-ceph-agent-vccpt 1/1 Running 0 4m10s 192.168.0.18 k8snode2 <none>
rook-ceph-system rook-ceph-operator-f4cd7f8d5-zt7f4 1/1 Running 0 4m24s 10.244.2.62 k8snode2 <none>
rook-ceph-system rook-discover-589mf 1/1 Running 0 4m10s 10.244.2.63 k8snode2 <none>
rook-ceph-system rook-discover-qhv9q 1/1 Running 0 4m10s 10.244.1.232 k8snode1 <none>
#################################################################################
# This example first defines some necessary namespace and RBAC security objects.
# The actual Ceph Cluster CRD example can be found at the bottom of this example.
#################################################################################
apiVersion: v1
kind: Namespace
metadata:
name: rook-ceph
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-cluster
namespace: rook-ceph
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-cluster
namespace: rook-ceph
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: [ "get", "list", "watch", "create", "update", "delete" ]
---
# Allow the operator to create resources in this cluster's namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-cluster-mgmt
namespace: rook-ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-cluster-mgmt
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: rook-ceph-system
---
# Allow the pods in this namespace to work with configmaps
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-cluster
namespace: rook-ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-cluster
subjects:
- kind: ServiceAccount
name: rook-ceph-cluster
namespace: rook-ceph
---
#################################################################################
# The Ceph Cluster CRD example
#################################################################################
apiVersion: ceph.rook.io/v1beta1
kind: Cluster
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
cephVersion:
# For the latest ceph images, see https://hub.docker.com/r/ceph/ceph/tags
image: ceph/ceph:v13.2.2-20181023
dataDirHostPath: /var/lib/rook
dashboard:
enabled: true
storage:
useAllNodes: true
useAllDevices: false
config:
databaseSizeMB: "1024"
journalSizeMB: "1024"
kubectl create -f cluster.yaml
dada@k8smaster:~/rook$ kubectl get pods -n rook-ceph -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
rook-ceph-mgr-a-5f6dd98574-tm9md 1/1 Running 0 3m3s 10.244.2.126 k8snode2 <none>
rook-ceph-mon0-sk798 1/1 Running 0 4m36s 10.244.1.42 k8snode1 <none>
rook-ceph-mon1-bxgjt 1/1 Running 0 4m16s 10.244.2.125 k8snode2 <none>
rook-ceph-mon2-snznb 1/1 Running 0 3m48s 10.244.1.43 k8snode1 <none>
rook-ceph-osd-id-0-54c856d49d-77hfr 1/1 Running 0 2m27s 10.244.1.45 k8snode1 <none>
rook-ceph-osd-id-1-7d98bf85b5-rt4jw 1/1 Running 0 2m26s 10.244.2.128 k8snode2 <none>
rook-ceph-osd-prepare-k8snode1-dzd5v 0/1 Completed 0 2m41s 10.244.1.44 k8snode1 <none>
rook-ceph-osd-prepare-k8snode2-2jgvg 0/1 Completed 0 2m41s 10.244.2.127 k8snode2 <none>
apiVersion: ceph.rook.io/v1beta1
kind: Pool
metadata:
name: replicapool
namespace: rook-ceph
spec:
replicated:
size: 3
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-ceph-block
provisioner: ceph.rook.io/bloc
parameters:
pool: replicapool
clusterNamespace: rook-ceph
kubectl create -f storageclass.yaml
dada@k8smaster:~/rook$ cat dashboard-external.yaml
apiVersion: v1
kind: Service
metadata:
name: rook-ceph-mgr-dashboard-external
namespace: rook-ceph
labels:
app: rook-ceph-mgr
rook_cluster: rook-ceph
spec:
ports:
- name: dashboard
port: 7000
protocol: TCP
targetPort: 7000
selector:
app: rook-ceph-mgr
rook_cluster: rook-ceph
sessionAffinity: None
type: NodePort
dada@k8smaster:~/rook$ kubectl -n rook-ceph get service | grep Node
rook-ceph-mgr-dashboard-external NodePort 10.99.88.135 <none> 7000:31165/TCP 3m41s
La gestion du système de fichiers que je vous propose n'est pas sans risque. Les volumes que vous allez créer doivent être configurés sérieusement. Les exemples que vous aller trouver par-ci par-là vous permettront d'avoir un stockage dans votre cluster k8s, certes, mais rendront sans doute vos volumes dépendants de vos pods. Si vous décidez de supprimer le pod pour lequel vous avec un PVC, le PV disparaîtra, et vos données avec.
Prenez le temps de bien réfléchir et de bien plus étudier la question que ce que je vous propose dans mes billets avant de vous lancer dans une installation en production.