Skip to content
Toggle navigation
P
Projects
G
Groups
S
Snippets
Help
tingweiwang
/
k8s
This project
Loading...
Sign in
Toggle navigation
Go to a project
Project
Repository
Issues
0
Merge Requests
0
Pipelines
Wiki
Snippets
Settings
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Commit 484e0011
authored
Jun 08, 2020
by
tingweiwang
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
kpl2.0
1 parent
7f8938d4
Show whitespace changes
Inline
Side-by-side
Showing
30 changed files
with
129 additions
and
536 deletions
autodl_kpl_base/autodl-nfs/4-pv_pvc.yaml
autodl_kpl_base/kpl-nfs/4-pv_pvc.yaml
autodl_kpl_base/kpl-ssl-configmap-autodl.yaml
autodl_kpl_base/kpl-ssl-configmap.yaml
autodl_seetaas_base/autodl-nfs/1-namespace.yaml
autodl_seetaas_base/autodl-nfs/2-autocnn-sa.yaml
autodl_seetaas_base/autodl-nfs/3-secrets.yaml
autodl_seetaas_base/autodl-nfs/4-pv_pvc.yaml
autodl_seetaas_base/ceph-back/autodl-ceph/1-namespace.yaml
autodl_seetaas_base/ceph-back/autodl-ceph/2-autocnn-sa.yaml
autodl_seetaas_base/ceph-back/autodl-ceph/3-secrets.yaml
autodl_seetaas_base/ceph-back/autodl-ceph/4-ceph-secret.yaml
autodl_seetaas_base/ceph-back/autodl-ceph/5-pv_pvc.yaml
autodl_seetaas_base/ceph-back/seetaas-ceph/.rpc-server-user-center-v1.yaml.swp
autodl_seetaas_base/ceph-back/seetaas-ceph/1-namespace.yaml
autodl_seetaas_base/ceph-back/seetaas-ceph/2-autocnn-sa.yaml
autodl_seetaas_base/ceph-back/seetaas-ceph/3-secrets.yaml
autodl_seetaas_base/ceph-back/seetaas-ceph/4-ceph-secret.yaml
autodl_seetaas_base/ceph-back/seetaas-ceph/5-pv_pvc.yaml
autodl_seetaas_base/create-harbor-secret.sh
autodl_seetaas_base/kpl-nfs/.rpc-server-user-center-v1.yaml.swp
autodl_seetaas_base/kpl-nfs/1-namespace.yaml
autodl_seetaas_base/kpl-nfs/2-autocnn-sa.yaml
autodl_seetaas_base/kpl-nfs/3-secrets.yaml
autodl_seetaas_base/kpl-nfs/4-pv_pvc.yaml
script/k8s/deploy-gpu-k8s.sh
script/seetaas/2-replace-file.sh
script/seetaas/5-seetaas-start.sh
script/seetaas/reset_mysql.sh
script/seetaas/start_server.sh
autodl_kpl_base/autodl-nfs/4-pv_pvc.yaml
View file @
484e001
...
@@ -5,9 +5,9 @@ metadata:
...
@@ -5,9 +5,9 @@ metadata:
name
:
adl-pv
name
:
adl-pv
spec
:
spec
:
nfs
:
nfs
:
path
:
/storage/nfs
path
:
NFS_PATH
##########模板变量,nfs的目录
server
:
192.168.1.53
server
:
NFS_SERVER
#############模板变量,nfs的ip
accessModes
:
accessModes
:
-
ReadWriteMany
-
ReadWriteMany
capacity
:
capacity
:
...
...
autodl_kpl_base/kpl-nfs/4-pv_pvc.yaml
View file @
484e001
...
@@ -4,8 +4,8 @@ metadata:
...
@@ -4,8 +4,8 @@ metadata:
name
:
kpl-pv
name
:
kpl-pv
spec
:
spec
:
nfs
:
nfs
:
path
:
/storage/nfs
path
:
NFS_PATH
server
:
192.168.1.53
server
:
NFS_SERVER
accessModes
:
accessModes
:
-
ReadWriteMany
-
ReadWriteMany
capacity
:
capacity
:
...
...
autodl_kpl_base/kpl-ssl-configmap-autodl.yaml
0 → 100644
View file @
484e001
apiVersion
:
v1
data
:
server.crt
:
|
-----BEGIN CERTIFICATE-----
MIICKTCCAbCgAwIBAgIJAOEzff/TB45/MAoGCCqGSM49BAMCMFMxCzAJBgNVBAYT
AkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRn
aXRzIFB0eSBMdGQxDDAKBgNVBAMMA0tQTDAeFw0yMDA2MDMwMzA2MDRaFw0zMDA2
MDEwMzA2MDRaMFMxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDDAKBgNVBAMMA0tQTDB2
MBAGByqGSM49AgEGBSuBBAAiA2IABDUlvu7GTOB4kYiqBuRqiLU3chqccZhMFL16
olmMX31M8EWA0VXj5JeMo4js7NcuBRfFp2JIdhqOroodz+Bu64nmhjbr7Qkglk14
XguoUrwycTAlba2JdBpXRXwY5uP7eqNQME4wHQYDVR0OBBYEFPQ81JjaE8UG4FyX
Hjo09H9dRkcEMB8GA1UdIwQYMBaAFPQ81JjaE8UG4FyXHjo09H9dRkcEMAwGA1Ud
EwQFMAMBAf8wCgYIKoZIzj0EAwIDZwAwZAIwSCzsAdwv5fJOlAMI6W+0s5whygR3
VQEq88EffPmjQ8Cn6rqWFzev4Cd5W18Qput9AjAjoBh5WdlK1N0sIZpRLaCYK7El
2vab3X1CbV8MkwGJU7Vnjav+w185kSNpbpF6idw=
-----END CERTIFICATE-----
server.key
:
|
-----BEGIN EC PARAMETERS-----
BgUrgQQAIg==
-----END EC PARAMETERS-----
-----BEGIN EC PRIVATE KEY-----
MIGkAgEBBDCc8hpwAUrmEZUnFeD4Fi/OnMT2fAXVtJ50FIR/HCWMD/pPDV1uKLZI
Hm6h6fRQX82gBwYFK4EEACKhZANiAAQ1Jb7uxkzgeJGIqgbkaoi1N3IanHGYTBS9
eqJZjF99TPBFgNFV4+SXjKOI7OzXLgUXxadiSHYajq6KHc/gbuuJ5oY26+0JIJZN
eF4LqFK8MnEwJW2tiXQaV0V8GObj+3o=
-----END EC PRIVATE KEY-----
kind
:
ConfigMap
metadata
:
name
:
kpl-ssl
namespace
:
autodl
autodl_kpl_base/kpl-ssl-configmap.yaml
0 → 100644
View file @
484e001
apiVersion
:
v1
data
:
server.crt
:
|
-----BEGIN CERTIFICATE-----
MIICKTCCAbCgAwIBAgIJAOEzff/TB45/MAoGCCqGSM49BAMCMFMxCzAJBgNVBAYT
AkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRn
aXRzIFB0eSBMdGQxDDAKBgNVBAMMA0tQTDAeFw0yMDA2MDMwMzA2MDRaFw0zMDA2
MDEwMzA2MDRaMFMxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDDAKBgNVBAMMA0tQTDB2
MBAGByqGSM49AgEGBSuBBAAiA2IABDUlvu7GTOB4kYiqBuRqiLU3chqccZhMFL16
olmMX31M8EWA0VXj5JeMo4js7NcuBRfFp2JIdhqOroodz+Bu64nmhjbr7Qkglk14
XguoUrwycTAlba2JdBpXRXwY5uP7eqNQME4wHQYDVR0OBBYEFPQ81JjaE8UG4FyX
Hjo09H9dRkcEMB8GA1UdIwQYMBaAFPQ81JjaE8UG4FyXHjo09H9dRkcEMAwGA1Ud
EwQFMAMBAf8wCgYIKoZIzj0EAwIDZwAwZAIwSCzsAdwv5fJOlAMI6W+0s5whygR3
VQEq88EffPmjQ8Cn6rqWFzev4Cd5W18Qput9AjAjoBh5WdlK1N0sIZpRLaCYK7El
2vab3X1CbV8MkwGJU7Vnjav+w185kSNpbpF6idw=
-----END CERTIFICATE-----
server.key
:
|
-----BEGIN EC PARAMETERS-----
BgUrgQQAIg==
-----END EC PARAMETERS-----
-----BEGIN EC PRIVATE KEY-----
MIGkAgEBBDCc8hpwAUrmEZUnFeD4Fi/OnMT2fAXVtJ50FIR/HCWMD/pPDV1uKLZI
Hm6h6fRQX82gBwYFK4EEACKhZANiAAQ1Jb7uxkzgeJGIqgbkaoi1N3IanHGYTBS9
eqJZjF99TPBFgNFV4+SXjKOI7OzXLgUXxadiSHYajq6KHc/gbuuJ5oY26+0JIJZN
eF4LqFK8MnEwJW2tiXQaV0V8GObj+3o=
-----END EC PRIVATE KEY-----
kind
:
ConfigMap
metadata
:
creationTimestamp
:
"
2020-06-03T03:06:22Z"
name
:
kpl-ssl
namespace
:
kpl
resourceVersion
:
"
61559587"
selfLink
:
/api/v1/namespaces/kpl/configmaps/kpl-ssl
uid
:
4c6174b1-3847-4c29-a7e7-fcd7b6e011e9
autodl_seetaas_base/autodl-nfs/1-namespace.yaml
deleted
100644 → 0
View file @
7f8938d
apiVersion
:
v1
kind
:
Namespace
metadata
:
creationTimestamp
:
null
name
:
autodl
selfLink
:
/api/v1/namespaces/autodl
spec
:
finalizers
:
-
kubernetes
status
:
phase
:
Active
autodl_seetaas_base/autodl-nfs/2-autocnn-sa.yaml
deleted
100644 → 0
View file @
7f8938d
---
apiVersion
:
v1
kind
:
ServiceAccount
metadata
:
name
:
autodl-serviceaccount
namespace
:
autodl
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
ClusterRole
metadata
:
name
:
autodl-clusterrole
rules
:
-
apiGroups
:
-
"
"
resources
:
-
nodes
verbs
:
[
"
watch"
,
"
list"
,
"
patch"
]
-
nonResourceURLs
:
-
/version/
verbs
:
[
"
get"
]
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
Role
metadata
:
name
:
autodl-role
namespace
:
autodl
rules
:
-
apiGroups
:
[
"
"
]
resources
:
[
"
pods"
]
verbs
:
[
"
get"
,
"
watch"
,
"
list"
,
"
create"
,
"
delete"
,
"
patch"
]
-
apiGroups
:
[
"
"
]
resources
:
[
"
configmaps"
,
"
secrets"
]
verbs
:
[
"
get"
,
"
update"
,
"
patch"
,
"
create"
,
"
delete"
]
-
apiGroups
:
[
"
"
,
"
extensions"
]
resources
:
[
"
services"
,
"
ingresses"
,
"
deployments"
,
"
replicasets"
]
verbs
:
[
"
get"
,
"
create"
,
"
patch"
,
"
delete"
,
"
list"
]
-
apiGroups
:
[
"
"
,
"
*"
]
resources
:
[
"
events"
,
"
pods/status"
,
"
pods/log"
]
verbs
:
[
"
watch"
,
"
get"
,
"
list"
]
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
RoleBinding
metadata
:
name
:
autodl-role-binding
namespace
:
autodl
roleRef
:
apiGroup
:
rbac.authorization.k8s.io
kind
:
Role
name
:
autodl-role
subjects
:
-
kind
:
ServiceAccount
name
:
autodl-serviceaccount
namespace
:
autodl
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
ClusterRoleBinding
metadata
:
name
:
autodl-clusterrole-binding
roleRef
:
apiGroup
:
rbac.authorization.k8s.io
kind
:
ClusterRole
name
:
autodl-clusterrole
subjects
:
-
kind
:
ServiceAccount
name
:
autodl-serviceaccount
namespace
:
autodl
autodl_seetaas_base/autodl-nfs/3-secrets.yaml
deleted
100644 → 0
View file @
7f8938d
apiVersion
:
v1
kind
:
Secret
metadata
:
name
:
releasev1-autodl-secret
labels
:
app
:
releasev1-autocnn
release
:
releasev1
type
:
autocnn-core
namespace
:
autodl
type
:
Opaque
data
:
autocnn-secret
:
U2VlVGFhU0F1dG9DTk4xMzUK
# echo SeeTaaSAutoCNN135 | base64
user-password
:
U2VlVGFhU0F1dG9DTk4xMzUK
smtp-password
:
"
"
k8s-authorisation
:
"
"
autodl_seetaas_base/autodl-nfs/4-pv_pvc.yaml
deleted
100644 → 0
View file @
7f8938d
---
apiVersion
:
v1
kind
:
PersistentVolume
metadata
:
name
:
adl-pv
spec
:
nfs
:
path
:
/storage/nfs
server
:
192.168.1.53
accessModes
:
-
ReadWriteMany
capacity
:
storage
:
500Gi
---
kind
:
PersistentVolumeClaim
apiVersion
:
v1
metadata
:
name
:
adl-pvc
namespace
:
autodl
spec
:
accessModes
:
-
ReadWriteMany
resources
:
requests
:
storage
:
100Gi
autodl_seetaas_base/ceph-back/autodl-ceph/1-namespace.yaml
deleted
100644 → 0
View file @
7f8938d
apiVersion
:
v1
kind
:
Namespace
metadata
:
creationTimestamp
:
null
name
:
autodl
selfLink
:
/api/v1/namespaces/autodl
spec
:
finalizers
:
-
kubernetes
status
:
phase
:
Active
autodl_seetaas_base/ceph-back/autodl-ceph/2-autocnn-sa.yaml
deleted
100644 → 0
View file @
7f8938d
---
apiVersion
:
v1
kind
:
ServiceAccount
metadata
:
name
:
autodl-serviceaccount
namespace
:
autodl
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
ClusterRole
metadata
:
name
:
autodl-clusterrole
rules
:
-
apiGroups
:
-
"
"
resources
:
-
nodes
verbs
:
[
"
watch"
,
"
list"
,
"
patch"
]
-
nonResourceURLs
:
-
/version/
verbs
:
[
"
get"
]
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
Role
metadata
:
name
:
autodl-role
namespace
:
autodl
rules
:
-
apiGroups
:
[
"
"
]
resources
:
[
"
pods"
]
verbs
:
[
"
get"
,
"
watch"
,
"
list"
,
"
create"
,
"
delete"
,
"
patch"
]
-
apiGroups
:
[
"
"
]
resources
:
[
"
configmaps"
,
"
secrets"
]
verbs
:
[
"
get"
,
"
update"
,
"
patch"
,
"
create"
,
"
delete"
]
-
apiGroups
:
[
"
"
,
"
extensions"
]
resources
:
[
"
services"
,
"
ingresses"
,
"
deployments"
,
"
replicasets"
]
verbs
:
[
"
get"
,
"
create"
,
"
patch"
,
"
delete"
,
"
list"
]
-
apiGroups
:
[
"
"
,
"
*"
]
resources
:
[
"
events"
,
"
pods/status"
,
"
pods/log"
]
verbs
:
[
"
watch"
,
"
get"
,
"
list"
]
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
RoleBinding
metadata
:
name
:
autodl-role-binding
namespace
:
autodl
roleRef
:
apiGroup
:
rbac.authorization.k8s.io
kind
:
Role
name
:
autodl-role
subjects
:
-
kind
:
ServiceAccount
name
:
autodl-serviceaccount
namespace
:
autodl
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
ClusterRoleBinding
metadata
:
name
:
autodl-clusterrole-binding
roleRef
:
apiGroup
:
rbac.authorization.k8s.io
kind
:
ClusterRole
name
:
autodl-clusterrole
subjects
:
-
kind
:
ServiceAccount
name
:
autodl-serviceaccount
namespace
:
autodl
autodl_seetaas_base/ceph-back/autodl-ceph/3-secrets.yaml
deleted
100644 → 0
View file @
7f8938d
apiVersion
:
v1
kind
:
Secret
metadata
:
name
:
releasev1-autodl-secret
labels
:
app
:
releasev1-autocnn
release
:
releasev1
type
:
autocnn-core
namespace
:
autodl
type
:
Opaque
data
:
autocnn-secret
:
U2VlVGFhU0F1dG9DTk4xMzUK
# echo SeeTaaSAutoCNN135 | base64
user-password
:
U2VlVGFhU0F1dG9DTk4xMzUK
smtp-password
:
"
"
k8s-authorisation
:
"
"
autodl_seetaas_base/ceph-back/autodl-ceph/4-ceph-secret.yaml
deleted
100644 → 0
View file @
7f8938d
apiVersion
:
v1
kind
:
Secret
metadata
:
name
:
ceph-secret
namespace
:
autodl
data
:
key
:
QVFBdlZnWmJoQ3NhRWhBQU95SlZGWWJadVJESnBRR3BKRERhc3c9PQo=
# use this command to get key: sudo ceph auth get-key client.admin | base64
autodl_seetaas_base/ceph-back/autodl-ceph/5-pv_pvc.yaml
deleted
100644 → 0
View file @
7f8938d
---
apiVersion
:
v1
kind
:
PersistentVolume
metadata
:
name
:
adl-pv
namespace
:
autodl
annotations
:
volume.beta.kubernetes.io/mount-options
:
rbytes
spec
:
capacity
:
storage
:
500Gi
accessModes
:
-
ReadWriteMany
cephfs
:
monitors
:
-
192.168.1.51:6789
path
:
/
user
:
admin
secretRef
:
name
:
ceph-secret
# secretFile: "/etc/ceph/admin.secret"
readOnly
:
false
persistentVolumeReclaimPolicy
:
Retain
---
kind
:
PersistentVolumeClaim
apiVersion
:
v1
metadata
:
name
:
adl-pvc
namespace
:
autodl
spec
:
accessModes
:
-
ReadWriteMany
resources
:
requests
:
storage
:
20Gi
autodl_seetaas_base/ceph-back/seetaas-ceph/.rpc-server-user-center-v1.yaml.swp
deleted
100644 → 0
View file @
7f8938d
No preview for this file type
autodl_seetaas_base/ceph-back/seetaas-ceph/1-namespace.yaml
deleted
100644 → 0
View file @
7f8938d
apiVersion
:
v1
kind
:
Namespace
metadata
:
creationTimestamp
:
null
name
:
seetaas
spec
:
finalizers
:
-
kubernetes
autodl_seetaas_base/ceph-back/seetaas-ceph/2-autocnn-sa.yaml
deleted
100644 → 0
View file @
7f8938d
---
apiVersion
:
v1
kind
:
ServiceAccount
metadata
:
name
:
seetaas-serviceaccount
namespace
:
seetaas
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
ClusterRole
metadata
:
name
:
seetaas-clusterrole
rules
:
-
apiGroups
:
-
"
"
resources
:
-
nodes
verbs
:
[
"
watch"
,
"
list"
,
"
patch"
]
-
nonResourceURLs
:
-
/version/
verbs
:
[
"
get"
]
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
Role
metadata
:
name
:
seetaas-role
namespace
:
seetaas
rules
:
-
apiGroups
:
[
"
"
]
resources
:
[
"
pods"
]
verbs
:
[
"
get"
,
"
watch"
,
"
list"
,
"
create"
,
"
delete"
,
"
patch"
]
-
apiGroups
:
[
"
"
]
resources
:
[
"
configmaps"
,
"
secrets"
]
verbs
:
[
"
get"
,
"
update"
,
"
patch"
,
"
create"
,
"
delete"
]
-
apiGroups
:
[
"
"
,
"
extensions"
]
resources
:
[
"
services"
,
"
ingresses"
,
"
deployments"
,
"
replicasets"
]
verbs
:
[
"
get"
,
"
create"
,
"
patch"
,
"
delete"
,
"
list"
]
-
apiGroups
:
[
"
"
,
"
*"
]
resources
:
[
"
events"
,
"
pods/status"
,
"
pods/log"
]
verbs
:
[
"
watch"
,
"
get"
,
"
list"
]
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
RoleBinding
metadata
:
name
:
seetaas-role-binding
namespace
:
seetaas
roleRef
:
apiGroup
:
rbac.authorization.k8s.io
kind
:
Role
name
:
seetaas-role
subjects
:
-
kind
:
ServiceAccount
name
:
seetaas-serviceaccount
namespace
:
seetaas
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
ClusterRoleBinding
metadata
:
name
:
seetaas-clusterrole-binding
roleRef
:
apiGroup
:
rbac.authorization.k8s.io
kind
:
ClusterRole
name
:
seetaas-clusterrole
subjects
:
-
kind
:
ServiceAccount
name
:
seetaas-serviceaccount
namespace
:
seetaas
autodl_seetaas_base/ceph-back/seetaas-ceph/3-secrets.yaml
deleted
100644 → 0
View file @
7f8938d
apiVersion
:
v1
kind
:
Secret
metadata
:
name
:
releasev1-seetaas-secret
labels
:
app
:
releasev1-autocnn
release
:
releasev1
type
:
autocnn-core
namespace
:
seetaas
type
:
Opaque
data
:
autocnn-secret
:
U2VlVGFhU0F1dG9DTk4xMzUK
# echo SeeTaaSAutoCNN135 | base64
user-password
:
U2VlVGFhU0F1dG9DTk4xMzUK
smtp-password
:
"
"
k8s-authorisation
:
"
"
autodl_seetaas_base/ceph-back/seetaas-ceph/4-ceph-secret.yaml
deleted
100644 → 0
View file @
7f8938d
apiVersion
:
v1
kind
:
Secret
metadata
:
name
:
ceph-secret
namespace
:
seetaas
data
:
key
:
QVFBdlZnWmJoQ3NhRWhBQU95SlZGWWJadVJESnBRR3BKRERhc3c9PQo=
# use this command to get key: sudo ceph auth get-key client.admin | base64
autodl_seetaas_base/ceph-back/seetaas-ceph/5-pv_pvc.yaml
deleted
100644 → 0
View file @
7f8938d
---
apiVersion
:
v1
kind
:
PersistentVolume
metadata
:
name
:
seetaas-pv
namespace
:
seetaas
annotations
:
volume.beta.kubernetes.io/mount-options
:
rbytes
spec
:
capacity
:
storage
:
500Gi
accessModes
:
-
ReadWriteMany
cephfs
:
monitors
:
-
192.168.1.51:6789
path
:
/
user
:
admin
secretRef
:
name
:
ceph-secret
# secretFile: "/etc/ceph/admin.secret"
readOnly
:
false
persistentVolumeReclaimPolicy
:
Retain
---
kind
:
PersistentVolumeClaim
apiVersion
:
v1
metadata
:
name
:
seetaas-pvc
namespace
:
seetaas
spec
:
accessModes
:
-
ReadWriteMany
resources
:
requests
:
storage
:
20Gi
autodl_seetaas_base/create-harbor-secret.sh
deleted
100755 → 0
View file @
7f8938d
kubectl create secret generic harbor-secret -n autodl
\
--from-file
=
.dockerconfigjson
=
/root/.docker/config.json
\
--type
=
kubernetes.io/dockerconfigjson
autodl_seetaas_base/kpl-nfs/.rpc-server-user-center-v1.yaml.swp
deleted
100644 → 0
View file @
7f8938d
No preview for this file type
autodl_seetaas_base/kpl-nfs/1-namespace.yaml
deleted
100644 → 0
View file @
7f8938d
apiVersion
:
v1
kind
:
Namespace
metadata
:
creationTimestamp
:
null
name
:
kpl
spec
:
finalizers
:
-
kubernetes
autodl_seetaas_base/kpl-nfs/2-autocnn-sa.yaml
deleted
100644 → 0
View file @
7f8938d
---
apiVersion
:
v1
kind
:
ServiceAccount
metadata
:
name
:
kpl-serviceaccount
namespace
:
kpl
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
ClusterRole
metadata
:
name
:
kpl-clusterrole
rules
:
-
apiGroups
:
-
"
"
resources
:
-
nodes
verbs
:
[
"
watch"
,
"
list"
,
"
patch"
]
-
nonResourceURLs
:
-
/version/
verbs
:
[
"
get"
]
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
Role
metadata
:
name
:
kpl-role
namespace
:
kpl
rules
:
-
apiGroups
:
[
"
"
]
resources
:
[
"
pods"
]
verbs
:
[
"
get"
,
"
watch"
,
"
list"
,
"
create"
,
"
delete"
,
"
patch"
]
-
apiGroups
:
[
"
"
]
resources
:
[
"
configmaps"
,
"
secrets"
]
verbs
:
[
"
get"
,
"
update"
,
"
patch"
,
"
create"
,
"
delete"
]
-
apiGroups
:
[
"
"
,
"
extensions"
]
resources
:
[
"
services"
,
"
ingresses"
,
"
deployments"
,
"
replicasets"
]
verbs
:
[
"
get"
,
"
create"
,
"
patch"
,
"
delete"
,
"
list"
]
-
apiGroups
:
[
"
"
,
"
*"
]
resources
:
[
"
events"
,
"
pods/status"
,
"
pods/log"
]
verbs
:
[
"
watch"
,
"
get"
,
"
list"
]
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
RoleBinding
metadata
:
name
:
kpl-role-binding
namespace
:
kpl
roleRef
:
apiGroup
:
rbac.authorization.k8s.io
kind
:
Role
name
:
kpl-role
subjects
:
-
kind
:
ServiceAccount
name
:
kpl-serviceaccount
namespace
:
kpl
---
apiVersion
:
rbac.authorization.k8s.io/v1beta1
kind
:
ClusterRoleBinding
metadata
:
name
:
kpl-clusterrole-binding
roleRef
:
apiGroup
:
rbac.authorization.k8s.io
kind
:
ClusterRole
name
:
kpl-clusterrole
subjects
:
-
kind
:
ServiceAccount
name
:
kpl-serviceaccount
namespace
:
kpl
autodl_seetaas_base/kpl-nfs/3-secrets.yaml
deleted
100644 → 0
View file @
7f8938d
apiVersion
:
v1
kind
:
Secret
metadata
:
name
:
releasev1-kpl-secret
labels
:
app
:
releasev1-autocnn
release
:
releasev1
type
:
autocnn-core
namespace
:
kpl
type
:
Opaque
data
:
autocnn-secret
:
U2VlVGFhU0F1dG9DTk4xMzUK
# echo SeeTaaSAutoCNN135 | base64
user-password
:
U2VlVGFhU0F1dG9DTk4xMzUK
smtp-password
:
"
"
k8s-authorisation
:
"
"
autodl_seetaas_base/kpl-nfs/4-pv_pvc.yaml
deleted
100644 → 0
View file @
7f8938d
apiVersion
:
v1
kind
:
PersistentVolume
metadata
:
name
:
kpl-pv
spec
:
nfs
:
path
:
/storage/nfs
server
:
192.168.1.53
accessModes
:
-
ReadWriteMany
capacity
:
storage
:
500Gi
---
kind
:
PersistentVolumeClaim
apiVersion
:
v1
metadata
:
name
:
kpl-pvc
namespace
:
kpl
spec
:
accessModes
:
-
ReadWriteMany
resources
:
requests
:
storage
:
100Gi
script/k8s/deploy-gpu-k8s.sh
View file @
484e001
...
@@ -10,8 +10,20 @@ harbor_host=192.168.1.32:5000
...
@@ -10,8 +10,20 @@ harbor_host=192.168.1.32:5000
harbor_passwd
=
admin
#写死的不能改,harbor配置文件中password写死了,当前只能是admin
harbor_passwd
=
admin
#写死的不能改,harbor配置文件中password写死了,当前只能是admin
image_path
=
/root/k8s/images
#写死的不能改
image_path
=
/root/k8s/images
#写死的不能改
interface
=
eno1
#本机IP的物理网卡名称,用于flannel的配置。
interface
=
eno1
#本机IP的物理网卡名称,用于flannel的配置。
image_list
=
`
cat
$image_path
/image_list.txt
`
image_list_ksy
=
`
cat
$image_path
/image_list_ksy.txt
`
host_name
=
`
hostname
`
NFS_PATH
=
########################################################################################
########################################################################################
#ssh-keygen -y -f id_rsa > id_rsa.pub 私钥生成公钥
#ssh-keygen -y -f id_rsa > id_rsa.pub 私钥生成公钥
#echo "判断是否允许root登陆"
#grep -w "PermitRootLogin yes" /etc/ssh/sshd_config
#if [[ $? -ep 0 ]];then
# echo"允许root用户登陆"
#else
# echo "当前设置不允许root用户登陆,开始自动设置" && sleep 3
# sed -i /PermitRootLogin/s/prohibit-password/yes/g /etc/ssh/sshd_config && service sshd restart
echo
"开始安装ansbile"
echo
"开始安装ansbile"
apt update --allow-insecure-repositories
apt update --allow-insecure-repositories
apt install conntrack nfs-common sshpass ansible -y --allow-unauthenticated
apt install conntrack nfs-common sshpass ansible -y --allow-unauthenticated
...
@@ -393,31 +405,31 @@ ansible master -m shell -a "service docker restart"
...
@@ -393,31 +405,31 @@ ansible master -m shell -a "service docker restart"
echo
"解压harbor到/usr/local/目录"
echo
"解压harbor到/usr/local/目录"
tar -xvzf /root/k8s/package/harbor_aly.tar.gz -C /usr/local/
tar -xvzf /root/k8s/package/harbor_aly.tar.gz -C /usr/local/
cd
/usr/local/harbor/
&&
sed -i s/harbor_host/
$harbor_host
/g harbor.cfg
&&
./prepare
&&
./install.sh
&&
echo
"harbor安装成功"
cd
/usr/local/harbor/
&&
sed -i s/harbor_host/
$harbor_host
/g harbor.cfg
&&
./prepare
&&
./install.sh
&&
echo
"harbor安装成功"
###########################推送私有镜像到harbor仓库#######
for
image
in
`
ls
$image_path
`
do
echo
"开始导入镜像到本地"
docker load -i
$image_path
/
$image
done
echo
"开始给镜像打tag"
docker tag hub.kce.ksyun.com/kpl_k8s/image-proxy:v0.9.2
$harbor_host
/k8s/image-proxy:v0.9.2
docker tag hub.kce.ksyun.com/kpl_k8s/ubuntu-basic:16.04
$harbor_host
/k8s/ubuntu-basic:16.04
docker tag hub.kce.ksyun.com/kpl_k8s/coredns:1.5.0
$harbor_host
/k8s/coredns:1.5.0
docker tag hub.kce.ksyun.com/kpl_k8s/k8s-device-plugin:1.0.0-beta4
$harbor_host
/k8s/k8s-device-plugin:1.0.0-beta4
docker tag hub.kce.ksyun.com/kpl_k8s/pause-amd64:3.0
$harbor_host
/k8s/pause-amd64:3.0
docker tag hub.kce.ksyun.com/kpl_k8s/nginx:latest
$harbor_host
/k8s/nginx:latest
docker tag hub.kce.ksyun.com/kpl_k8s/cuda:10.0-base-ubuntu16.04
$harbor_host
/k8s/cuda:10.0-base-ubuntu16.04
echo
"开始推送镜像到harbor"
echo
"您的harbor服务器访问地址为:
$harbor_host
,请创建harbor所项目目录:k8s core autodl seetaas autodl-workspacm 您有90s的时间"
echo
"您的harbor服务器访问地址为:
$harbor_host
,请创建harbor所项目目录:k8s core autodl seetaas autodl-workspacm 您有90s的时间"
sleep 90
sleep 90
docker login
$harbor_host
-uadmin -p
$harbor_passwd
docker login
$harbor_host
-uadmin -p
$harbor_passwd
docker push
$harbor_host
/k8s/image-proxy:v0.9.2
###########################推送私有镜像到harbor仓库#######
docker push
$harbor_host
/k8s/ubuntu-basic:16.04
echo
"####################开始执行镜像导入###########################"
docker push
$harbor_host
/k8s/coredns:1.5.0
for
image
in
`
ls
$image_path
|grep -Ev
"*.sh"
|grep -Ev
"*.txt"
|grep -Ev
"images.tar.gz"
`
docker push
$harbor_host
/k8s/k8s-device-plugin:1.0.0-beta4
do
docker push
$harbor_host
/k8s/pause-amd64:3.0
echo
"开始导入
$image
镜像"
docker push
$harbor_host
/k8s/nginx:latest
docker load -i
$image_path
/
$image
docker push
$harbor_host
/k8s/cuda:10.0-base-ubuntu16.04
done
echo
"#####################开始执行镜像推送############################"
for
i
in
${
image_list
[@]
}
do
image_name
=
`
echo
$i
|awk -F
"/"
'{print $3}'
`
docker tag
$i
$harbor_host
/k8s/
$image_name
echo
"开始push
$harbor_host
/k8s/
$image_name
镜像"
docker push
$harbor_host
/k8s/
$image_name
done
for
j
in
${
image_list_ksy
[@]
}
do
image_name
=
`
echo
$j
|awk -F
"/"
'{print $4}'
`
docker tag
$j
$harbor_host
/k8s/
$image_name
echo
"开始push
$harbor_host
/k8s/
$image_name
镜像"
docker push
$harbor_host
/k8s/
$image_name
done
################################安装coredns以及nvidia-kubernetes插件##########
################################安装coredns以及nvidia-kubernetes插件##########
sed -i s/harbor_host/
$harbor_host
/g /root/k8s/config/coredns.yaml
sed -i s/harbor_host/
$harbor_host
/g /root/k8s/config/coredns.yaml
kubectl create -f /root/k8s/config/coredns.yaml
kubectl create -f /root/k8s/config/coredns.yaml
...
@@ -430,8 +442,22 @@ sed -i s/$harbor_host/harbor_host/g /root/k8s/config/nvidia-device-plugin.yml
...
@@ -430,8 +442,22 @@ sed -i s/$harbor_host/harbor_host/g /root/k8s/config/nvidia-device-plugin.yml
##############################################################################
##############################################################################
echo
"设置服务的alias快捷方式,需要手动source 环境变量"
echo
"设置服务的alias快捷方式,需要手动source 环境变量"
echo
"alias k='kubectl -n autodl'"
>> ~/.bashrc
echo
"alias k='kubectl -n autodl'"
>> ~/.bashrc
echo
"alias k
s='kubectl -n seetaas
'"
>> ~/.bashrc
echo
"alias k
p='kubectl -n kpl
'"
>> ~/.bashrc
sleep 5
sleep 5
##############################################################################
##############################################################################
echo
"请手动在master节点 设置命令自动补全,命令在脚本最后一行注释"
echo
"请手动在master节点 设置命令自动补全,命令在脚本最后一行注释"
#source <(kubectl completion bash) && echo "source <(kubectl completion bash)" >> ~/.bashrc"
#source <(kubectl completion bash) && echo "source <(kubectl completion bash)" >> ~/.bashrc"
##############################################################################
echo
"为gpu节点打标签"
kubectl lable node
$host_name
autodl
=
true
kpl
=
true
gpu
=
true
user_job_node
=
true
internal_service_node
=
true
echo
"###################创建pv pvc#############"
echo
"########################修改pv模板#######################"
&&
sleep 3
sed -i s/NFS_PATH/
$NFS_PATH
/g /root/k8s/autodl_kpl_base/autodl-nfs/4-pv_pvc.yaml
sed -i s/NFS_PATH/
$NFS_PATH
/g /root/k8s/autodl_kpl_base/kpl-nfs/4-pv_pvc.yaml
kubectl apply -f /root/k8s/autodl_kpl_base/autodl-nfs
kubectl apply -f /root/k8s/autodl_kpl_base/kpl-nfs
echo
"kpl-launcher所需证书"
&&
sleep 2
kubectl apply -f /root/k8s/autodl_kpl_base/kpl-ssl-configmap-autodl.yaml
kubectl apply -f /root/k8s/autodl_kpl_base/kpl-ssl-configmap.yaml
echo
"创建autodl push 镜像所需secret证书,用于configmap"
sh /root/k8s/autodl_kpl_base/create-harbor-secret.sh
script/seetaas/2-replace-file.sh
View file @
484e001
...
@@ -18,7 +18,7 @@ mysql_host=192.168.1.32:3306
...
@@ -18,7 +18,7 @@ mysql_host=192.168.1.32:3306
mysql_user
=
root
mysql_user
=
root
mysql_password
=
seetatech
mysql_password
=
seetatech
kpl_db_name
=
seetaas
#kpl服务数据库名字
kpl_db_name
=
seetaas
#kpl服务数据库名字
quota_db_name
=
quota
quota_db_name
=
quota
#新增quota 数据库
redis_host
=
192.168.1.32:6379
redis_host
=
192.168.1.32:6379
redis_password
=
seetatech
redis_password
=
seetatech
mongo_host
=
'mongodb://admin:admin@192.168.1.32:27017/seetaas?authSource=admin'
#&号前记得加转义
mongo_host
=
'mongodb://admin:admin@192.168.1.32:27017/seetaas?authSource=admin'
#&号前记得加转义
...
...
script/seetaas/5-seetaas-start.sh
View file @
484e001
...
@@ -15,6 +15,9 @@ cd $workspace/seetaas-v3 && make private_deploy mode=dev
...
@@ -15,6 +15,9 @@ cd $workspace/seetaas-v3 && make private_deploy mode=dev
#################################################################
#################################################################
echo
"部署seetaas-backend"
echo
"部署seetaas-backend"
cd
$workspace
/src/seetaas-backend
&&
make private_deploy
mode
=
dev
cd
$workspace
/src/seetaas-backend
&&
make private_deploy
mode
=
dev
########################################################
echo
"部署kpl-stream"
cd
$workspace
/src/seetaas-backend
&&
make private_deploy
mode
=
dev
##################################################################
##################################################################
watch kubectl get pod -n autodl
watch kubectl get pod -n autodl
watch kubectl get pod -n kpl
watch kubectl get pod -n kpl
script/seetaas/reset_mysql.sh
View file @
484e001
...
@@ -7,6 +7,8 @@ echo "删除数据库user-center"
...
@@ -7,6 +7,8 @@ echo "删除数据库user-center"
mysql -u
$user
-p
$password
-e
'drop database `user-center`'
mysql -u
$user
-p
$password
-e
'drop database `user-center`'
echo
"删除数据库seetaas"
echo
"删除数据库seetaas"
mysql -u
$user
-p
$password
-e
'drop database `seetaas`'
mysql -u
$user
-p
$password
-e
'drop database `seetaas`'
echo
"删除数据库quota"
mysql -u
$user
-p
$password
-e
'drop database `quota`'
#################################################
#################################################
echo
"重新创建数据库autodl-core"
echo
"重新创建数据库autodl-core"
mysql -u
$user
-p
$password
-e
'create database `autodl-core`'
mysql -u
$user
-p
$password
-e
'create database `autodl-core`'
...
@@ -14,3 +16,5 @@ echo "重新创建数据库user-center"
...
@@ -14,3 +16,5 @@ echo "重新创建数据库user-center"
mysql -u
$user
-p
$password
-e
'create database `user-center`'
mysql -u
$user
-p
$password
-e
'create database `user-center`'
echo
"重新创建数据库seetaas"
echo
"重新创建数据库seetaas"
mysql -u
$user
-p
$password
-e
'create database `seetaas`'
mysql -u
$user
-p
$password
-e
'create database `seetaas`'
echo
"重新创建数据库quota"
mysql -u
$user
-p
$password
-e
'create database `quota`'
script/seetaas/start_server.sh
View file @
484e001
...
@@ -40,6 +40,8 @@ cd $workspace/seetaas-v3 && make private_deploy mode=dev
...
@@ -40,6 +40,8 @@ cd $workspace/seetaas-v3 && make private_deploy mode=dev
sleep 5
sleep 5
echo
"===================== seetaas-backend ==================="
echo
"===================== seetaas-backend ==================="
cd
$workspace
/src/seetaas-backend
&&
make private_deploy
mode
=
dev
cd
$workspace
/src/seetaas-backend
&&
make private_deploy
mode
=
dev
echo
"===================== kpl-stream ==================="
cd
$workspace
/src/kpl-stream
&&
make private_deploy
mode
=
dev
###################################################
###################################################
echo
"wait 20s for start all server"
echo
"wait 20s for start all server"
sleep 20
sleep 20
...
...
Write
Preview
Markdown
is supported
Attach a file
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to post a comment