Commit 0a48f656 by tingweiwang

first push

0 parents
Showing with 1440 additions and 0 deletions
#!/bin/bash
#测试环境执行脚本用户为seetaas,执行sed 需要sudo 并设置 NOPASSWD
######################项目所在路径#################################
#workspace=/home/wangtingwei/workspace
config_workspace=/root/configmap/ #configmap模板所在的路径
################seetaas组件变量信息##########################
seetaas_frontend=http://106.38.203.204:30180 #seetaas前端访问主页
seetaas_backend=http://106.38.203.204:30200 #seetaas后端api
seetaas_back_socket=ws://106.38.203.204:30200 # https对应wss
seetaas_imageproxy=http://106.38.203.204:30202 #seetaas图片服务器api
seetaas_deploy=http://106.38.203.204:30201 #seetaas部署api
usercenter_frontend=http://106.38.203.204:30302 #用户中心前端
usercenter_backend=http://106.38.203.204:30300 #用户中心后端
core_nginx=http://106.38.203.204:30099
visit_uc_frontend=106.38.203.204 #user-center前端的主机ip 或域名
mobile_frontend=http://106.38.203.204:30181 #seetaas手机端扫码页面
################################autodl配置文件##########################
#autodl_backend=http://106.38.203.204:30091
#autodl_back_socket=ws://106.38.203.204:30091
#autodl_file=http://106.38.203.204:30093
#deploy_is_private=true #true代表私有部署
#autodl_admin_backend=http://106.38.203.204:30092 # 30092
#autodl_admin_back_socket=ws://106.38.203.204:30092
#custom_source=
###############其他依赖服务变量信息#########################################
#autodl_es=http://192.168.1.51:9200
mysql_host=192.168.1.53:3306
mysql_user=root
mysql_password=root
redis_host=192.168.1.53:6379
redis_password=
mongo_host='mongodb://admin:admin@192.168.1.51:27017,192.168.1.14:27017/seetaas?replicaSet=my-mongodb\&authSource=admin' #&号前记得加转义
harbor_host=192.168.1.53:5000 #内部镜像仓库,
hb_host=hb.seetatech.com #外部镜像仓库 (定义算法用的镜像仓库拉取位置)
##################################go_proxy_server###############
#go_proxy_server=http://goproxy.seetatech.com
###############################修改后端app.yaml################################
sudo sed -i s#redis_host#$redis_host#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#mysql_host#$mysql_host#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#mysql_user#$mysql_user#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#mongo_host#$mongo_host#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#harbor_host#$harbor_host#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#redis_password#$redis_password#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#mysql_password#$mysql_password#g `find $config_workspace -type f -name "*.yaml"`
###################################################################################
sudo sed -i s#seetaas_deploy#$seetaas_deploy#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#core_nginx#$core_nginx#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#seetaas_frontend#$seetaas_frontend#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#visit_uc_frontend#$visit_uc_frontend#g `find $config_workspace -type f -name "*.yaml"`
###############################修改前端app.yaml#######################################
sudo sed -i s#seetaas_backend#$seetaas_backend#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#usercenter_frontend#$usercenter_frontend#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#usercenter_backend#$usercenter_backend#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#seetaas_back_socket#$seetaas_back_socket#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#seetaas_imageproxy#$seetaas_imageproxy#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#mobile_frontend#$mobile_frontend#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#custom_source#$custom_source#g `find $config_workspace -type f -name "*.yaml"`
sudo sed -i s#hb_host#$hb_host#g `find $config_workspace -type f -name "*.yaml"`
#################################################################
#cp ~/.docker/config.json $workspace/src/autodl-core/conf/
#cp ~/.docker/config.json $workspace/src/seetaas-backend/conf/
#测试环境部署的仓库是registry。pod内部推送镜像不需要认证。
########################################autodl配置文件#######################################
#sudo sed -i s#autodl_es#$autodl_es#g `find $config_workspace -type f -name "*.yaml"`
#sudo sed -i s#deploy_is_private#$deploy_is_private#g `find $config_workspace -type f -name "*.yaml"`
#sudo sed -i s#autodl_backend#$autodl_backend#g `find $config_workspace -type f -name "*.yaml"`
#sudo sed -i s#autodl_admin_backend#$autodl_admin_backend#g `find $config_workspace -type f -name "*.yaml"`
##############################################################################
#sudo sed -i s#autodl_backend#$autodl_backend#g `find $config_workspace -type f -name "*.yaml"`
#sudo sed -i s#autodl_back_socket#$autodl_back_socket#g `find $config_workspace -type f -name "*.yaml"`
#sudo sed -i s#autodl_file#$autodl_file#g `find $config_workspace -type f -name "*.yaml"`
#sudo sed -i s#autodl_admin_backend#$autodl_admin_backend#g `find $config_workspace -type f -name "*.yaml"`
#sudo sed -i s#autodl_admin_back_socket#$autodl_admin_back_socket#g `find $config_workspace -type f -name "*.yaml"`
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: autodl
selfLink: /api/v1/namespaces/autodl
spec:
finalizers:
- kubernetes
status:
phase: Active
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: autodl-serviceaccount
namespace: autodl
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: autodl-clusterrole
rules:
- apiGroups:
- ""
resources:
- nodes
verbs: ["watch", "list", "patch"]
- nonResourceURLs:
- /version/
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: autodl-role
namespace: autodl
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["configmaps", "secrets"]
verbs: ["get", "update", "patch", "create", "delete"]
- apiGroups: ["", "extensions"]
resources: ["services", "ingresses", "deployments", "replicasets"]
verbs: ["get", "create", "patch", "delete", "list"]
- apiGroups: ["", "*"]
resources: ["events", "pods/status", "pods/log"]
verbs: ["watch", "get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: autodl-role-binding
namespace: autodl
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: autodl-role
subjects:
- kind: ServiceAccount
name: autodl-serviceaccount
namespace: autodl
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: autodl-clusterrole-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: autodl-clusterrole
subjects:
- kind: ServiceAccount
name: autodl-serviceaccount
namespace: autodl
apiVersion: v1
kind: Secret
metadata:
name: releasev1-autodl-secret
labels:
app: releasev1-autocnn
release: releasev1
type: autocnn-core
namespace: autodl
type: Opaque
data:
autocnn-secret: U2VlVGFhU0F1dG9DTk4xMzUK # echo SeeTaaSAutoCNN135 | base64
user-password: U2VlVGFhU0F1dG9DTk4xMzUK
smtp-password: ""
k8s-authorisation: ""
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: adl-pv
spec:
nfs:
path: xxx #nfs path
server: xxxx #nfs ip
accessModes:
- ReadWriteMany
capacity:
storage: 500Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: adl-pvc
namespace: autodl
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: seetaas
spec:
finalizers:
- kubernetes
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: seetaas-serviceaccount
namespace: seetaas
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: seetaas-clusterrole
rules:
- apiGroups:
- ""
resources:
- nodes
verbs: ["watch", "list", "patch"]
- nonResourceURLs:
- /version/
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: seetaas-role
namespace: seetaas
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["configmaps", "secrets"]
verbs: ["get", "update", "patch", "create", "delete"]
- apiGroups: ["", "extensions"]
resources: ["services", "ingresses", "deployments", "replicasets"]
verbs: ["get", "create", "patch", "delete", "list"]
- apiGroups: ["", "*"]
resources: ["events", "pods/status", "pods/log"]
verbs: ["watch", "get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: seetaas-role-binding
namespace: seetaas
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: seetaas-role
subjects:
- kind: ServiceAccount
name: seetaas-serviceaccount
namespace: seetaas
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: seetaas-clusterrole-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: seetaas-clusterrole
subjects:
- kind: ServiceAccount
name: seetaas-serviceaccount
namespace: seetaas
apiVersion: v1
kind: Secret
metadata:
name: releasev1-seetaas-secret
labels:
app: releasev1-autocnn
release: releasev1
type: autocnn-core
namespace: seetaas
type: Opaque
data:
autocnn-secret: U2VlVGFhU0F1dG9DTk4xMzUK # echo SeeTaaSAutoCNN135 | base64
user-password: U2VlVGFhU0F1dG9DTk4xMzUK
smtp-password: ""
k8s-authorisation: ""
apiVersion: v1
kind: PersistentVolume
metadata:
name: seetaas-pv
spec:
nfs:
path: xxx #修改nfs的path
server: xxxx #nfs ip
accessModes:
- ReadWriteMany
capacity:
storage: 500Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: seetaas-pvc
namespace: seetaas
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: core--collector
namespace: autodl
spec:
template:
metadata:
labels:
app: core--collector
logCollect: "true"
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: autodl-serviceaccount
containers:
- name: core--collector
image: DOCKER_REGISTRY/core--collector:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /mnt/ceph
name: adl-volume
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: hostname
mountPath: /etc/autocnn_hostname
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["core--collector"]
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: adl-volume
persistentVolumeClaim:
claimName: adl-pvc
readOnly: false
- name: docker-sock
hostPath:
path: /var/run/
- name: hostname
hostPath:
path: /etc/hostname
- name: config
configMap:
name: cfg-autodl-core
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: core--monitor
namespace: autodl
spec:
replicas: 1
template:
metadata:
labels:
app: core--monitor
logCollect: "true"
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: autodl-serviceaccount
nodeSelector:
internal_service_node: "true"
containers:
- name: core--monitor
image: DOCKER_REGISTRY/adl-core-v1:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /mnt/ceph
name: adl-volume
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["core--monitor"]
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: adl-volume
persistentVolumeClaim:
claimName: adl-pvc
readOnly: false
- name: docker-sock
hostPath:
path: /var/run/
- name: config
configMap:
name: cfg-autodl-core
apiVersion: v1
kind: Service
metadata:
name: core--nginx-svc
namespace: autodl
spec:
type: NodePort
selector:
app: core--nginx
ports:
- port: 80
name: port-80
targetPort: 80
nodePort: 30099
\ No newline at end of file
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: core--nginx
namespace: autodl
spec:
replicas: 1
template:
metadata:
labels:
app: core--nginx
spec:
nodeSelector:
internal_service_node: "true"
containers:
- name: core--nginx
image: DOCKER_REGISTRY/core--nginx:latest
imagePullPolicy: Always
ports:
- containerPort: 80
name: port-80
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "500m"
memory: "1024Mi"
\ No newline at end of file
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: core--server
namespace: autodl
spec:
replicas: 1
template:
metadata:
labels:
app: core--server
logCollect: "true"
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: autodl-serviceaccount
nodeSelector:
internal_service_node: "true"
containers:
- name: core--server
image: DOCKER_REGISTRY/adl-core-v1:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /mnt/ceph
name: adl-volume
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["core--server"]
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: adl-volume
persistentVolumeClaim:
claimName: adl-pvc
readOnly: false
- name: docker-sock
hostPath:
path: /var/run/
- name: config
configMap:
name: cfg-autodl-core
apiVersion: v1
kind: Service
metadata:
labels:
app: core--server
name: core--server
namespace: autodl
spec:
type: NodePort
selector:
app: core--server
ports:
- port: 30100
name: port-30100
targetPort: 30100
nodePort: 30100
\ No newline at end of file
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: core--worker
namespace: autodl
spec:
replicas: 1
template:
metadata:
labels:
app: core--worker
logCollect: "true"
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: autodl-serviceaccount
nodeSelector:
internal_service_node: "true"
containers:
- name: core--worker
image: DOCKER_REGISTRY/adl-core-v1:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /mnt/ceph
name: adl-volume
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["core--worker"]
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: adl-volume
persistentVolumeClaim:
claimName: adl-pvc
readOnly: false
- name: docker-sock
hostPath:
path: /var/run/
- name: config
configMap:
name: cfg-autodl-core
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: seetaas--deploy
namespace: seetaas
spec:
replicas: 1
template:
metadata:
labels:
app: seetaas--deploy
logCollect: "true"
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: seetaas-serviceaccount
nodeSelector:
seetaas: "true"
internal_service_node: "true"
containers:
- name: seetaas--deploy
image: DOCKER_REGISTRY/seetaas-backend-v1:latest
imagePullPolicy: Always
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["seetaas--deploy"]
ports:
- containerPort: 8921
name: port-8921
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: docker-sock
hostPath:
path: /var/run/
- name: config
configMap:
name: cfg-seetaas-backend
apiVersion: v1
kind: Service
metadata:
labels:
app: seetaas--deploy
name: seetaas--deploy
namespace: seetaas
spec:
selector:
app: seetaas--deploy
ports:
- port: 8921
name: port-8921
targetPort: 8921
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: seetaas--image-proxy
namespace: seetaas
spec:
replicas: 1
template:
metadata:
labels:
app: seetaas--image-proxy
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: seetaas-serviceaccount
nodeSelector:
seetaas: "true"
internal_service_node: "true"
containers:
- name: seetaas--image-proxy
image: DOCKER_REGISTRY/image-proxy-v1:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /mnt/ceph
name: seetaas-volume
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["/app/imageproxy"]
args: ["-addr", "0.0.0.0:8922", "-verbose"]
ports:
- containerPort: 8922
name: port-8922
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: seetaas-volume
persistentVolumeClaim:
claimName: seetaas-pvc
readOnly: false
- name: docker-sock
hostPath:
path: /var/run/
- name: config
configMap:
name: cfg-seetaas-backend
apiVersion: v1
kind: Service
metadata:
labels:
app: seetaas--image-proxy
name: seetaas--image-proxy
namespace: seetaas
spec:
selector:
app: seetaas--image-proxy
ports:
- port: 8922
name: port-8922
targetPort: 8922
\ No newline at end of file
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: seetaas--monitor
namespace: seetaas
spec:
replicas: 1
template:
metadata:
labels:
app: seetaas--monitor
logCollect: "true"
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: seetaas-serviceaccount
nodeSelector:
seetaas: "true"
internal_service_node: "true"
containers:
- name: seetaas--monitor
image: DOCKER_REGISTRY/seetaas-backend-v1:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /mnt/ceph
name: seetaas-volume
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["seetaas--monitor"]
ports:
- containerPort: 8920
name: port-8920
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: seetaas-volume
persistentVolumeClaim:
claimName: seetaas-pvc
readOnly: false
- name: docker-sock
hostPath:
path: /var/run/
- name: config
configMap:
name: cfg-seetaas-backend
apiVersion: v1
kind: Service
metadata:
labels:
app: seetaas--monitor
name: seetaas--monitor
namespace: seetaas
spec:
selector:
app: seetaas--monitor
ports:
- port: 8920
name: port-8920
targetPort: 8920
apiVersion: v1
kind: Service
metadata:
name: seetaas--nginx-svc
namespace: seetaas
spec:
type: NodePort
selector:
app: seetaas--nginx
ports:
- port: 80
name: port-80
targetPort: 80
nodePort: 30200
- port: 81
name: port-81
targetPort: 81
nodePort: 30201
- port: 82
name: port-82
targetPort: 82
nodePort: 30202
- port: 443
name: port-443
targetPort: 443
nodePort: 30443
\ No newline at end of file
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: seetaas--nginx
namespace: seetaas
spec:
replicas: 1
template:
metadata:
labels:
app: seetaas--nginx
spec:
imagePullSecrets:
- name: "harbor-secret"
nodeSelector:
seetaas: "true"
internal_service_node: "true"
containers:
- name: seetaas--nginx
image: DOCKER_REGISTRY/seetaas--nginx:latest
imagePullPolicy: Always
ports:
- containerPort: 80
name: port-80
- containerPort: 81
name: port-81
- containerPort: 82
name: port-82
- containerPort: 443
name: port-443
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "500m"
memory: "1024Mi"
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: seetaas--backend
namespace: seetaas
spec:
replicas: 1
template:
metadata:
labels:
app: seetaas--backend
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: seetaas-serviceaccount
nodeSelector:
seetaas: "true"
internal_service_node: "true"
containers:
- name: seetaas--backend
image: DOCKER_REGISTRY/seetaas-backend-v1:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /mnt/ceph
name: seetaas-volume
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["seetaas--backend"]
ports:
- containerPort: 8919
name: port-8919
resources:
requests:
cpu: "1000m"
memory: "1024Mi"
limits:
cpu: "6000m"
memory: "8Gi"
volumes:
- name: seetaas-volume
persistentVolumeClaim:
claimName: seetaas-pvc
readOnly: false
- name: docker-sock
hostPath:
path: /var/run/
- name: config
configMap:
name: cfg-seetaas-backend
apiVersion: v1
kind: Service
metadata:
labels:
app: seetaas--backend
name: seetaas--backend
namespace: seetaas
spec:
selector:
app: seetaas--backend
ports:
- port: 8919
name: port-8919
targetPort: 8919
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: seetaas--worker
namespace: seetaas
spec:
replicas: 1
template:
metadata:
labels:
app: seetaas--worker
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: seetaas-serviceaccount
nodeSelector:
seetaas: "true"
internal_service_node: "true"
containers:
- name: seetaas--worker
image: DOCKER_REGISTRY/seetaas-backend-v1:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /mnt/ceph
name: seetaas-volume
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["bash", "-c"]
args: ["seetaas--script && seetaas--worker"]
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: seetaas-volume
persistentVolumeClaim:
claimName: seetaas-pvc
readOnly: false
- name: docker-sock
hostPath:
path: /var/run/
- name: config
configMap:
name: cfg-seetaas-backend
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: seetaas--workspace-worker
namespace: seetaas
spec:
replicas: 1
template:
metadata:
labels:
app: seetaas--workspace-worker
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: seetaas-serviceaccount
nodeSelector:
seetaas: "true"
internal_service_node: "true"
containers:
- name: seetaas--workspace-worker
image: DOCKER_REGISTRY/seetaas-backend-v1:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /mnt/ceph
name: seetaas-volume
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["seetaas--workspace-worker"]
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: seetaas-volume
persistentVolumeClaim:
claimName: seetaas-pvc
readOnly: false
- name: docker-sock
hostPath:
path: /var/run/
- name: config
configMap:
name: cfg-seetaas-backend
server
{
listen 80;
location / {
try_files $uri $uri/ /index.html;
root /usr/share/nginx/html/;
}
}
server
{
listen 81;
location / {
index mobile.html;
try_files $uri $uri/ /mobile.html;
root /usr/share/nginx/html/;
}
}
apiVersion: v1
kind: Service
metadata:
name: seetaas--frontend
namespace: seetaas
spec:
type: NodePort
selector:
app: seetaas--frontend
ports:
- port: 80
name: port-80
targetPort: 80
nodePort: 30180
- port: 81
name: port-81
targetPort: 81
nodePort: 30181
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: seetaas--frontend
namespace: seetaas
spec:
replicas: 1
template:
metadata:
labels:
app: seetaas--frontend
spec:
imagePullSecrets:
- name: "harbor-secret"
nodeSelector:
internal_service_node: "true"
containers:
- name: seetaas--frontend
image: DOCKER_REGISTRY/seetaas--frontend:latest
imagePullPolicy: Always
volumeMounts:
- name: config
mountPath: /usr/share/nginx/html/production-base.js
subPath: production-base.js
ports:
- containerPort: 80
name: port-80
- containerPort: 81
name: port-81
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: config
configMap:
name: cfg-seetaas-frontend
apiVersion: v1
kind: Service
metadata:
name: user-center--nginx-svc
namespace: autodl
spec:
type: NodePort
selector:
app: user-center--nginx
ports:
- port: 30300
name: port-30300
targetPort: 30300
nodePort: 30300
- port: 30302
name: port-30302
targetPort: 30302
nodePort: 30302
\ No newline at end of file
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: user-center--nginx
namespace: autodl
spec:
replicas: 1
template:
metadata:
labels:
app: user-center--nginx
spec:
imagePullSecrets:
- name: "harbor-secret"
nodeSelector:
internal_service_node: "true"
containers:
- name: user-center--nginx
image: DOCKER_REGISTRY/user-center--nginx:latest
imagePullPolicy: Always
ports:
- containerPort: 30300
name: port-30300
- containerPort: 30302
name: port-30302
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "500m"
memory: "1024Mi"
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: user-center--rpc-server
namespace: autodl
spec:
replicas: 1
template:
metadata:
labels:
app: user-center--rpc-server
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: autodl-serviceaccount
nodeSelector:
internal_service_node: "true"
containers:
- name: user-center--rpc-server
image: DOCKER_REGISTRY/user-center--backend:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /mnt/ceph
name: adl-volume
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["user-center--rpc-server"]
ports:
- containerPort: 10000
name: port-10000
- containerPort: 10001
name: port-10001
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: adl-volume
persistentVolumeClaim:
claimName: adl-pvc
readOnly: false
- name: docker-sock
hostPath:
path: /var/run/
- name: config
configMap:
name: cfg-user-center-backend
apiVersion: v1
kind: Service
metadata:
labels:
app: user-center--rpc-server
name: user-center--rpc-server
namespace: autodl
spec:
type: NodePort
selector:
app: user-center--rpc-server
ports:
- port: 10001
name: port-10001
targetPort: 10001
nodePort: 30301
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: user-center--backend
namespace: autodl
spec:
replicas: 1
template:
metadata:
labels:
app: user-center--backend
spec:
imagePullSecrets:
- name: "harbor-secret"
serviceAccountName: autodl-serviceaccount
nodeSelector:
internal_service_node: "true"
containers:
- name: user-center--backend
image: DOCKER_REGISTRY/user-center--backend:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /mnt/ceph
name: adl-volume
- name: docker-sock
mountPath: /var/run/docker.sock
subPath: docker.sock
- name: config
mountPath: /adl/bin/conf/app.yaml
subPath: app.yaml
command: ["user-center--backend"]
ports:
- containerPort: 10000
name: port-10000
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: adl-volume
persistentVolumeClaim:
claimName: adl-pvc
readOnly: false
- name: docker-sock
hostPath:
path: /var/run/
- name: config
configMap:
name: cfg-user-center-backend
apiVersion: v1
kind: Service
metadata:
labels:
app: user-center--backend
name: user-center--backend
namespace: autodl
spec:
selector:
app: user-center--backend
ports:
- port: 10000
name: port-10000
targetPort: 10000
apiVersion: v1
kind: Service
metadata:
name: user-center--frontend
namespace: autodl
spec:
selector:
app: user-center--frontend
ports:
- port: 80
name: port-80
targetPort: 80
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: user-center--frontend
namespace: autodl
spec:
replicas: 1
template:
metadata:
labels:
app: user-center--frontend
spec:
imagePullSecrets:
- name: "harbor-secret"
nodeSelector:
internal_service_node: "true"
containers:
- name: user-center--frontend
image: DOCKER_REGISTRY/user-center--frontend:latest
imagePullPolicy: Always
volumeMounts:
- name: config
mountPath: /usr/share/nginx/html/production-base.js
subPath: production-base.js
ports:
- containerPort: 80
name: port-80
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1024Mi"
volumes:
- name: config
configMap:
name: cfg-user-center-frontend
create table `autodl-core`.task_status
(
id int auto_increment,
task_id varchar(64) not null,
status varchar(64) not null,
created_at datetime null,
msg text null,
constraint task_id_uindex unique (id));
alter table `autodl-core`.task_status
add primary key (id)
;
create table `autodl-core`.job
(
id int auto_increment,
task_id varchar(64) not null,
definition json not null,
created_at datetime null,
updated_at datetime null,
constraint job_id_uindex unique (id)
)
;
alter table `autodl-core`.job
add primary key (id)
;
ALTER TABLE job ADD mode varchar(63) NULL
\ No newline at end of file
create table `autodl-core`.service
(
id int auto_increment,
service_id varchar(255) not null,
noti_api text not null,
created_at datetime null,
constraint service_id_uindex unique (id),
constraint service_service_id_uindex unique (service_id)
)
;
alter table `autodl-core`.service
add primary key (id)
;
ALTER TABLE job ADD has_service tinyint DEFAULT 0 NULL;
ALTER TABLE job ADD service_is_clean tinyint DEFAULT 0 NULL;
ALTER TABLE job DROP mode;
ALTER TABLE job ADD log_is_persist tinyint DEFAULT 0 NULL;
ALTER TABLE job ADD service_id varchar(255) DEFAULT "" NOT NULL;
ALTER TABLE task_status ADD service_id varchar(255) DEFAULT "" NOT NULL;
ALTER TABLE task_status MODIFY COLUMN service_id varchar(255) NOT NULL DEFAULT '' AFTER id;
ALTER TABLE task_status ADD is_notify_success tinyint DEFAULT 0 NOT NULL;
create table `autodl-core`.task
(
id int auto_increment,
service_id varchar(255) not null,
task_type varchar(255) not null,
task_list text not null,
created_at datetime null,
constraint task_id_uindex unique (id)
)
;
alter table `autodl-core`.task
add primary key (id)
;
ALTER TABLE job ADD log_is_flush_over tinyint DEFAULT 0 NULL;
\ No newline at end of file
create table `user-center`.user_info
(
id int unsigned auto_increment,
username varchar(256) charset utf8 not null,
nickname varchar(255) charset utf8 null,
signature varchar(255) charset utf8 null,
email varchar(255) charset utf8 null,
profile_image_url varchar(255) charset utf8 null,
phone varchar(11) charset utf8 null,
password varchar(255) charset utf8 null,
created_at datetime null,
updated_at datetime null,
local_server_token varchar(255) charset utf8mb4 null,
is_admin tinyint default '0' null,
constraint users_userinfo_id_uindex unique (id)
);
alter table `user-center`.user_info add primary key (id);
create table `user-center`.app_info
(
id int auto_increment,
app_name varchar(255) not null,
host varchar(255) not null,
created_at datetime null,
constraint service_info_id_uindex unique (id)
);
alter table `user-center`.app_info add primary key (id);
ALTER TABLE user_info ADD sha1_password varchar(255) NULL;
ALTER TABLE user_info DROP nickname;
ALTER TABLE user_info DROP signature;
ALTER TABLE user_info DROP profile_image_url;
ALTER TABLE user_info DROP local_server_token;
ALTER TABLE user_info DROP is_admin;
\ No newline at end of file
CREATE UNIQUE INDEX user_info_username_uindex ON user_info (username);
ALTER TABLE user_info MODIFY phone varchar(11) NOT NULL;
CREATE UNIQUE INDEX user_info_phone_uindex ON user_info (phone);
ALTER TABLE user_info ADD first_login tinyint NULL;
ALTER TABLE user_info MODIFY phone varchar(255) NOT NULL;
\ No newline at end of file
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!