Commit 32bd1081 by tingweiwang

fix bug

1 parent fbec0c67
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: autodl
selfLink: /api/v1/namespaces/autodl
spec:
finalizers:
- kubernetes
status:
phase: Active
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: autodl-serviceaccount
namespace: autodl
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: autodl-clusterrole
rules:
- apiGroups:
- ""
resources:
- nodes
verbs: ["watch", "list", "patch"]
- nonResourceURLs:
- /version/
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: autodl-role
namespace: autodl
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["configmaps", "secrets"]
verbs: ["get", "update", "patch", "create", "delete"]
- apiGroups: ["", "extensions"]
resources: ["services", "ingresses", "deployments", "replicasets"]
verbs: ["get", "create", "patch", "delete", "list"]
- apiGroups: ["", "*"]
resources: ["events", "pods/status", "pods/log"]
verbs: ["watch", "get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: autodl-role-binding
namespace: autodl
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: autodl-role
subjects:
- kind: ServiceAccount
name: autodl-serviceaccount
namespace: autodl
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: autodl-clusterrole-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: autodl-clusterrole
subjects:
- kind: ServiceAccount
name: autodl-serviceaccount
namespace: autodl
apiVersion: v1
kind: Secret
metadata:
name: releasev1-autodl-secret
labels:
app: releasev1-autocnn
release: releasev1
type: autocnn-core
namespace: autodl
type: Opaque
data:
autocnn-secret: U2VlVGFhU0F1dG9DTk4xMzUK # echo SeeTaaSAutoCNN135 | base64
user-password: U2VlVGFhU0F1dG9DTk4xMzUK
smtp-password: ""
k8s-authorisation: ""
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: adl-pv
spec:
nfs:
path: /storage/nfs
server: 192.168.1.53
accessModes:
- ReadWriteMany
capacity:
storage: 500Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: adl-pvc
namespace: autodl
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: autodl
selfLink: /api/v1/namespaces/autodl
spec:
finalizers:
- kubernetes
status:
phase: Active
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: autodl-serviceaccount
namespace: autodl
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: autodl-clusterrole
rules:
- apiGroups:
- ""
resources:
- nodes
verbs: ["watch", "list", "patch"]
- nonResourceURLs:
- /version/
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: autodl-role
namespace: autodl
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["configmaps", "secrets"]
verbs: ["get", "update", "patch", "create", "delete"]
- apiGroups: ["", "extensions"]
resources: ["services", "ingresses", "deployments", "replicasets"]
verbs: ["get", "create", "patch", "delete", "list"]
- apiGroups: ["", "*"]
resources: ["events", "pods/status", "pods/log"]
verbs: ["watch", "get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: autodl-role-binding
namespace: autodl
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: autodl-role
subjects:
- kind: ServiceAccount
name: autodl-serviceaccount
namespace: autodl
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: autodl-clusterrole-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: autodl-clusterrole
subjects:
- kind: ServiceAccount
name: autodl-serviceaccount
namespace: autodl
apiVersion: v1
kind: Secret
metadata:
name: releasev1-autodl-secret
labels:
app: releasev1-autocnn
release: releasev1
type: autocnn-core
namespace: autodl
type: Opaque
data:
autocnn-secret: U2VlVGFhU0F1dG9DTk4xMzUK # echo SeeTaaSAutoCNN135 | base64
user-password: U2VlVGFhU0F1dG9DTk4xMzUK
smtp-password: ""
k8s-authorisation: ""
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
namespace: autodl
data:
key: QVFBdlZnWmJoQ3NhRWhBQU95SlZGWWJadVJESnBRR3BKRERhc3c9PQo=
# use this command to get key: sudo ceph auth get-key client.admin | base64
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: adl-pv
namespace: autodl
annotations:
volume.beta.kubernetes.io/mount-options: rbytes
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteMany
cephfs:
monitors:
- 192.168.1.51:6789
path: /
user: admin
secretRef:
name: ceph-secret
# secretFile: "/etc/ceph/admin.secret"
readOnly: false
persistentVolumeReclaimPolicy: Retain
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: adl-pvc
namespace: autodl
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 20Gi
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: seetaas
spec:
finalizers:
- kubernetes
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: seetaas-serviceaccount
namespace: seetaas
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: seetaas-clusterrole
rules:
- apiGroups:
- ""
resources:
- nodes
verbs: ["watch", "list", "patch"]
- nonResourceURLs:
- /version/
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: seetaas-role
namespace: seetaas
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["configmaps", "secrets"]
verbs: ["get", "update", "patch", "create", "delete"]
- apiGroups: ["", "extensions"]
resources: ["services", "ingresses", "deployments", "replicasets"]
verbs: ["get", "create", "patch", "delete", "list"]
- apiGroups: ["", "*"]
resources: ["events", "pods/status", "pods/log"]
verbs: ["watch", "get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: seetaas-role-binding
namespace: seetaas
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: seetaas-role
subjects:
- kind: ServiceAccount
name: seetaas-serviceaccount
namespace: seetaas
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: seetaas-clusterrole-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: seetaas-clusterrole
subjects:
- kind: ServiceAccount
name: seetaas-serviceaccount
namespace: seetaas
apiVersion: v1
kind: Secret
metadata:
name: releasev1-seetaas-secret
labels:
app: releasev1-autocnn
release: releasev1
type: autocnn-core
namespace: seetaas
type: Opaque
data:
autocnn-secret: U2VlVGFhU0F1dG9DTk4xMzUK # echo SeeTaaSAutoCNN135 | base64
user-password: U2VlVGFhU0F1dG9DTk4xMzUK
smtp-password: ""
k8s-authorisation: ""
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
namespace: seetaas
data:
key: QVFBdlZnWmJoQ3NhRWhBQU95SlZGWWJadVJESnBRR3BKRERhc3c9PQo=
# use this command to get key: sudo ceph auth get-key client.admin | base64
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: seetaas-pv
namespace: seetaas
annotations:
volume.beta.kubernetes.io/mount-options: rbytes
spec:
capacity:
storage: 500Gi
accessModes:
- ReadWriteMany
cephfs:
monitors:
- 192.168.1.51:6789
path: /
user: admin
secretRef:
name: ceph-secret
# secretFile: "/etc/ceph/admin.secret"
readOnly: false
persistentVolumeReclaimPolicy: Retain
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: seetaas-pvc
namespace: seetaas
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 20Gi
kubectl create secret generic harbor-secret -n autodl \
--from-file=.dockerconfigjson=/root/.docker/config.json \
--type=kubernetes.io/dockerconfigjson
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kpl
spec:
finalizers:
- kubernetes
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kpl-serviceaccount
namespace: kpl
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kpl-clusterrole
rules:
- apiGroups:
- ""
resources:
- nodes
verbs: ["watch", "list", "patch"]
- nonResourceURLs:
- /version/
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: kpl-role
namespace: kpl
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["configmaps", "secrets"]
verbs: ["get", "update", "patch", "create", "delete"]
- apiGroups: ["", "extensions"]
resources: ["services", "ingresses", "deployments", "replicasets"]
verbs: ["get", "create", "patch", "delete", "list"]
- apiGroups: ["", "*"]
resources: ["events", "pods/status", "pods/log"]
verbs: ["watch", "get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: kpl-role-binding
namespace: kpl
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kpl-role
subjects:
- kind: ServiceAccount
name: kpl-serviceaccount
namespace: kpl
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kpl-clusterrole-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kpl-clusterrole
subjects:
- kind: ServiceAccount
name: kpl-serviceaccount
namespace: kpl
apiVersion: v1
kind: Secret
metadata:
name: releasev1-kpl-secret
labels:
app: releasev1-autocnn
release: releasev1
type: autocnn-core
namespace: kpl
type: Opaque
data:
autocnn-secret: U2VlVGFhU0F1dG9DTk4xMzUK # echo SeeTaaSAutoCNN135 | base64
user-password: U2VlVGFhU0F1dG9DTk4xMzUK
smtp-password: ""
k8s-authorisation: ""
apiVersion: v1
kind: PersistentVolume
metadata:
name: kpl-pv
spec:
nfs:
path: /storage/nfs
server: 192.168.1.53
accessModes:
- ReadWriteMany
capacity:
storage: 500Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: kpl-pvc
namespace: kpl
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi
...@@ -68,7 +68,7 @@ sudo sed -i s/harbor_host/$harbor_host/g `find $workspace -maxdepth 3 -type f - ...@@ -68,7 +68,7 @@ sudo sed -i s/harbor_host/$harbor_host/g `find $workspace -maxdepth 3 -type f -
cp ~/.docker/config.json $workspace/src/autodl-core/conf/ #用于生产环境以及私有部署autodl-core服务向镜像仓库推送镜像所添加的认证文件。 cp ~/.docker/config.json $workspace/src/autodl-core/conf/ #用于生产环境以及私有部署autodl-core服务向镜像仓库推送镜像所添加的认证文件。
#cp ~/.docker/config.json $workspace/src/seetaas-backend/conf/ #用于生产环境以及私有部署autodl-core服务向镜像仓库推送镜像所添加的认证文件。 #cp ~/.docker/config.json $workspace/src/seetaas-backend/conf/ #用于生产环境以及私有部署autodl-core服务向镜像仓库推送镜像所添加的认证文件。
##################################修改Dockerfile####################################################### ##################################修改Dockerfile#######################################################
sed -i s#hb.seetatech.com/k8s/ubuntu-basic:16.04#$harbor_host/k8s/ubuntu-basic:16.04#g `find $workspace -maxdepth 5 -name Dockerfile -type f` sed -i s#hub.kce.ksyun.com/kpl_k8s/ubuntu-basic:16.04#$harbor_host/k8s/ubuntu-basic:16.04#g `find $workspace -maxdepth 5 -name Dockerfile -type f`
sed -i s#hb.seetatech.com/k8s/image-proxy:v0.9.0#$harbor_host/k8s/image-proxy:v0.9.0#g `find $workspace -maxdepth 5 -name Dockerfile -type f` sed -i s#hub.kce.ksyun.com/kpl_k8s/image-proxy:v0.9.2#$harbor_host/k8s/image-proxy:v0.9.2#g `find $workspace -maxdepth 5 -name Dockerfile -type f`
sed -i s#FROM\ nginx#FROM\ $harbor_host/k8s/nginx#g `find $workspace -maxdepth 5 -name Dockerfile -type f` sed -i s#hub.kce.ksyun.com/kpl_k8s/nginx:latest#$harbor_host/k8s/nginx:latest#g `find $workspace -maxdepth 5 -name Dockerfile -type f`
sed -i s#hb.seetatech.com/k8s/cuda:10.0-base-ubuntu16.04#$harbor_host/k8s/cuda:10.0-base-ubuntu16.04#g `find $workspace -maxdepth 5 -name Dockerfile -type f` sed -i s#hub.kce.ksyun.com/kpl_k8s/cuda:10.0-base-ubuntu16.04#$harbor_host/k8s/cuda:10.0-base-ubuntu16.04#g `find $workspace -maxdepth 5 -name Dockerfile -type f`
...@@ -21,8 +21,8 @@ mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/s ...@@ -21,8 +21,8 @@ mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/s
mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/sql/1121.sql mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/sql/1121.sql
mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/sql/190321.sql mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/sql/190321.sql
mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/sql/190402.sql mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/sql/190402.sql
echo "为autodl-core设置seetaas的 service记录" #echo "为autodl-core设置seetaas的 service记录"
mysql -u $mysql_user -p$mysql_password -e 'INSERT INTO `autodl-core`.`service` (`service_id`, `noti_api`) VALUES ("seetaas3", "http://kpl--monitor.kpl.svc.cluster.local:8920/status");' #mysql -u $mysql_user -p$mysql_password -e 'INSERT INTO `autodl-core`.`service` (`service_id`, `noti_api`) VALUES ("seetaas3", "http://kpl--monitor.kpl.svc.cluster.local:8920/status");'
echo "为autodl-core设置autodl的 service记录" echo "为autodl-core设置autodl的 service记录"
mysql -u $mysql_user -p$mysql_password -e 'INSERT INTO `autodl-core`.`service` (`service_id`, `noti_api`) VALUES ("autodl", "http://autodl--monitor.autodl.svc.cluster.local:8920/status");' mysql -u $mysql_user -p$mysql_password -e 'INSERT INTO `autodl-core`.`service` (`service_id`, `noti_api`) VALUES ("autodl", "http://autodl--monitor.autodl.svc.cluster.local:8920/status");'
########################autocnn数据库自动跑sql################################################### ########################autocnn数据库自动跑sql###################################################
...@@ -2,26 +2,26 @@ ...@@ -2,26 +2,26 @@
workspace=/workspace/seetaas workspace=/workspace/seetaas
config_workspace=/root/kpl_workspace/configmap-kpl config_workspace=/root/kpl_workspace/configmap-kpl
################kpl组件变量信息########################## ################kpl组件变量信息##########################
kpl_frontend=http://PUBLIC_IP:30180 #30180 kpl_frontend=http://106.38.203.205:30180 #30180
kpl_backend=http://PUBLIC_IP:30200 #30200 kpl_backend=http://106.38.203.205:30200 #30200
kpl_back_socket=ws://PUBLIC_IP:30200 # 30200 https对应wss kpl_back_socket=ws://106.38.203.205:30200 # 30200 https对应wss
kpl_imageproxy=http://PUBLIC_IP:30202 #30202 kpl_imageproxy=http://106.38.203.205:30202 #30202
kpl_deploy=http://PUBLIC_IP:30201 #30201 kpl_deploy=http://106.38.203.205:30201 #30201
usercenter_frontend=http://PUBLIC_IP:30302 #30302 usercenter_frontend=http://106.38.203.205:30302 #30302
usercenter_backend=http://PUBLIC_IP:30300 #30300 usercenter_backend=http://106.38.203.205:30300 #30300
core_nginx=http://PUBLIC_IP:30099 #30099 core_nginx=http://106.38.203.205:30099 #30099
visit_uc_frontend=PUBLIC_IP #user-center前端的主机ip 或域名 visit_uc_frontend=106.38.203.205 #user-center前端的主机ip 或域名
mobile_frontend=http://PUBLIC_IP:30181 #30181 mobile_frontend=http://106.38.203.205:30181 #30181
###############其他依赖服务变量信息######################################### ###############其他依赖服务变量信息#########################################
mysql_host=INTRANET_IP:3306 mysql_host=192.168.1.32:3306
mysql_user=root mysql_user=root
mysql_password=seetatech mysql_password=seetatech
kpl_db_name=seetaas #kpl服务数据库名字 kpl_db_name=seetaas #kpl服务数据库名字
redis_host=INTRANET_IP:6379 redis_host=192.168.1.32:6379
redis_password=seetatech redis_password=seetatech
mongo_host='mongodb://admin:admin@INTRANET_IP:27017/seetaas?authSource=admin' #&号前记得加转义 mongo_host='mongodb://admin:admin@192.168.1.32:27017/seetaas?authSource=admin' #&号前记得加转义
harbor_host=INTRANET_IP:5000 harbor_host=192.168.1.32:5000
hb_host=INTRANET_IP:5000 #如果是私有部署则共用一个私有harbor,如果是 公用部署则改成hub.kce.ksyun.com hb_host=192.168.1.32:5000 #如果是私有部署则共用一个私有harbor,如果是 公用部署则改成hub.kce.ksyun.com
############################kpl关键字整改################################################# ############################kpl关键字整改#################################################
kpl_service_id=seetaas3 kpl_service_id=seetaas3
kpl_realy_path=seetaas kpl_realy_path=seetaas
...@@ -56,7 +56,7 @@ sudo sed -i s/harbor_host/$harbor_host/g `find $workspace -maxdepth 3 -type f - ...@@ -56,7 +56,7 @@ sudo sed -i s/harbor_host/$harbor_host/g `find $workspace -maxdepth 3 -type f -
cp ~/.docker/config.json $workspace/src/autodl-core/conf/ #用于生产环境以及私有部署autodl-core服务向镜像仓库推送镜像所添加的认证文件。 cp ~/.docker/config.json $workspace/src/autodl-core/conf/ #用于生产环境以及私有部署autodl-core服务向镜像仓库推送镜像所添加的认证文件。
cp ~/.docker/config.json $workspace/src/seetaas-backend/conf/ #用于生产环境以及私有部署autodl-core服务向镜像仓库推送镜像所添加的认证文件。 cp ~/.docker/config.json $workspace/src/seetaas-backend/conf/ #用于生产环境以及私有部署autodl-core服务向镜像仓库推送镜像所添加的认证文件。
##################################修改Dockerfile####################################################### ##################################修改Dockerfile#######################################################
sed -i s#hb.seetatech.com/k8s/ubuntu-basic:16.04#$harbor_host/k8s/ubuntu-basic:16.04#g `find $workspace -maxdepth 5 -name Dockerfile -type f` sed -i s#hub.kce.ksyun.com/kpl_k8s/ubuntu-basic:16.04#$harbor_host/k8s/ubuntu-basic:16.04#g `find $workspace -maxdepth 5 -name Dockerfile -type f`
sed -i s#hb.seetatech.com/k8s/image-proxy:v0.9.0#$harbor_host/k8s/image-proxy:v0.9.0#g `find $workspace -maxdepth 5 -name Dockerfile -type f` sed -i s#hub.kce.ksyun.com/kpl_k8s/image-proxy:v0.9.2#$harbor_host/k8s/image-proxy:v0.9.2#g `find $workspace -maxdepth 5 -name Dockerfile -type f`
sed -i s#FROM\ nginx#FROM\ $harbor_host/k8s/nginx#g `find $workspace -maxdepth 5 -name Dockerfile -type f` sed -i s#hub.kce.ksyun.com/kpl_k8s/nginx:latest#$harbor_host/k8s/nginx#g `find $workspace -maxdepth 5 -name Dockerfile -type f`
sed -i s#hb.seetatech.com/k8s/cuda:10.0-base-ubuntu16.04#$harbor_host/k8s/cuda:10.0-base-ubuntu16.04#g `find $workspace -maxdepth 5 -name Dockerfile -type f` sed -i s#hub.kce.ksyun.com/kpl_k8s/cuda:10.0-base-ubuntu16.04#$harbor_host/k8s/cuda:10.0-base-ubuntu16.04#g `find $workspace -maxdepth 5 -name Dockerfile -type f`
...@@ -23,13 +23,13 @@ mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/s ...@@ -23,13 +23,13 @@ mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/s
mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/sql/190321.sql mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/sql/190321.sql
mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/sql/190402.sql mysql -u $mysql_user -p$mysql_password autodl-core <$workspace/src/autodl-core/sql/190402.sql
echo "为autodl-core设置service记录" echo "为autodl-core设置service记录"
mysql -u $mysql_user -p$mysql_password -e 'INSERT INTO `autodl-core`.`service` (`service_id`, `noti_api`) VALUES ("seetaas3", "http://seetaas--monitor.seetaas.svc.cluster.local:8920/status");' mysql -u $mysql_user -p$mysql_password -e 'INSERT INTO `autodl-core`.`service` (`service_id`, `noti_api`) VALUES ("kpl", "http://kpl--monitor.kpl.svc.cluster.local:8920/status");'
#######################设置user-center数据库相关######################## #######################设置user-center数据库相关########################
echo "加载user-center所需sql" echo "加载user-center所需sql"
mysql -u $mysql_user -p$mysql_password user-center <$workspace/src/user-center/sql/20190411.sql mysql -u $mysql_user -p$mysql_password user-center <$workspace/src/user-center/sql/20190411.sql
mysql -u $mysql_user -p$mysql_password user-center <$workspace/src/user-center/sql/20190425.sql mysql -u $mysql_user -p$mysql_password user-center <$workspace/src/user-center/sql/20190425.sql
echo "为user-center 设置service记录,记录为$seetaas_frontend" echo "为user-center 设置service记录,记录为$seetaas_frontend"
mysql -u $mysql_user -p$mysql_password -e 'INSERT INTO `user-center`.`app_info` (`app_name`, `host`) VALUES ("seetaas","http://106.38.203.204:30188");' mysql -u $mysql_user -p$mysql_password -e 'INSERT INTO `user-center`.`app_info` (`app_name`, `host`) VALUES ("kpl","http://106.38.203.204:30188");'
...@@ -6,11 +6,11 @@ mysql -u $user -p$password -e 'drop database `autodl-core`' ...@@ -6,11 +6,11 @@ mysql -u $user -p$password -e 'drop database `autodl-core`'
echo "删除数据库user-center" echo "删除数据库user-center"
mysql -u $user -p$password -e 'drop database `user-center`' mysql -u $user -p$password -e 'drop database `user-center`'
echo "删除数据库seetaas" echo "删除数据库seetaas"
mysql -u $user -p$password -e 'drop database `kpl`' mysql -u $user -p$password -e 'drop database `seetaas`'
################################################# #################################################
echo "重新创建数据库autodl-core" echo "重新创建数据库autodl-core"
mysql -u $user -p$password -e 'create database `autodl-core`' mysql -u $user -p$password -e 'create database `autodl-core`'
echo "重新创建数据库user-center" echo "重新创建数据库user-center"
mysql -u $user -p$password -e 'create database `user-center`' mysql -u $user -p$password -e 'create database `user-center`'
echo "重新创建数据库seetaas" echo "重新创建数据库seetaas"
mysql -u $user -p$password -e 'create database `kpl`' mysql -u $user -p$password -e 'create database `seetaas`'
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!