Commit e9af7534 by BoxuanXu

replace file

1 parent 1c6fd829
#!/usr/bin/env python
#_*_ coding: UTF-8 _*_
import pymysql.cursors
import logging
import subprocess
from libpywrap import *
import requests
import threading
from converter import Run_Converter
from flask import Flask,request
from jenkinsapi.jenkins import Jenkins
import GProgress as GP
import Queue
__website__ = "www.seetatech.com"
__author__ = "seetatech"
__editor__ = "xuboxuan"
__Date__ = "20170807"
#Create an instance of the flask class
app = Flask(__name__)
#initlization fdfs_client
stbf_stcnf("/etc/fdfs/client.conf")
download_path="."
#be userd to jenkins
host = 'http://192.168.1.242:8080/'
modelid = ""
output_layer = ""
pool_id = ""
err_msg = ""
post_url = "http://192.168l.1.127:1234/API"
#post_url = "http://192.168.1.244:5000/result"
#init post info queue
Info_Queue = Queue.Queue()
#initlization the logging
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(name)s:%(levelname)s: %(message)s"
)
db_atlas = pymysql.connect("192.168.1.127","defaultUser","magician","seetaAtlas",3306)
curl_atlas = db_atlas.cursor()
curl_atlas_exe = db_atlas.cursor()
def get_path_from_db(modelid,seetanet_model):
curl_atlas.execute("select path from seetaAtlas.model where id='%s'" % modelid)
result_atlas = curl_atlas.fetchall()
if len(result_atlas) != 1:
logging.info('get wrong mxnet model path')
return None,None
else:
path_name = str(result_atlas[0])
if path_name.find("|") == -1:
if path_name.find("::") == -1:
logging.info('get wrong parameters!')
return None,None
else:
path_list = path_name.split("::")
else:
path_list = path_name.split("|")
if len(path_list) != 2:
logging.info('get wrong parameters!')
else:
params_name_path = path_list[0]
params_name_path = params_name_path[2:len(params_name_path)]
#substr file name from path
params_name_index = params_name_path.rfind('/',0,len(path_list[0]))
params_name = params_name_path[params_name_index+1:len(path_list[0])]
graph_name_path = path_list[1]
graph_name_path = graph_name_path[0:len(graph_name_path)-3]
#substr file name from path
graph_name_index = graph_name_path.rfind('/',0,len(path_list[1]))
graph_name = graph_name_path[params_name_index+1:len(path_list[1])]
logging.info("The params's path is %s"% params_name_path)
logging.info("The model graph's path is %s"% graph_name_path)
#download file from fastdfs
if(stbf_down(params_name_path,download_path)):
logging.info("download params file: %s success" % params_name)
else:
return None,None
#download file from fastdfs
if(stbf_down(graph_name_path,download_path)):
logging.info("download params file: %s success" % graph_name)
else:
return None,None
#post return progress
GP.set_progress_var(10)
GP.Post_return()
#Driver converter model by params and model graph
result = Run_Converter(params_name,graph_name,seetanet_model)
if result is true:
return None,None
#Run_Converter("model-0015.params","model-symbol.json",seetanet_model)
return params_name,graph_name
def upload_filetoFastDFS(params_name, graph_name,seetanet_model):
#upload seetanet file to fastdfs
stmodel_fid = stbf_up(seetanet_model)
if(stmodel_fid):
logging.info("upload seetanet file success,seetanet model_id is %s" % stmodel_fid)
else:
logging.info("upload seetanet file failed!")
return None
return stmodel_fid
def get_info_from_queue(arg):
while 1:
if not Info_Queue.empty():
Info = Info_Queue.get()
logging.info("Begin Convert Info is : modelid = %s, output_layer = %s ,pool_id = %s" % (Info["modelid"],Info["output_layer"],Info["pool_id"]))
return_flag = "updateProgress"
modelid = Info["modelid"]
output_layer = Info["output_layer"]
pool_id = Info["pool_id"]
GP.set_pool_id_var(pool_id)
GP.set_err_msg_var("")
err_msg = ""
GP.set_post_type_var(return_flag)
try:
#post return progress
#GP.set_progress_var(10)
#GP.Post_return()
seetanet_model = "model_" + str(modelid) + ".data"
params_name,graph_name = get_path_from_db(modelid,seetanet_model)
if params_name is None or graph_name is None:
logging.info("get wrong params file")
err_msg = "get wrong params file"
return_flag = "error"
else:
stmodel_fid = upload_filetoFastDFS(params_name, graph_name,seetanet_model)
if stmodel_fid is None:
logging.info("upload filed")
err_msg = "upload filed"
return_flag = "error"
else:
#remove params file and graph file
try:
cmd_str = "rm -rf " + params_name + " " + graph_name + " " + seetanet_model
print(cmd_str)
subprocess.check_call(cmd_str,shell=True)
except subprocess.CalledProcessError as err:
logging.info("shell command error!")
err_msg = "shell command error!"
return_flag = "error"
logging.info("convert successfully!")
logging.info("Begin Run JenkIn!")
#GP.set_progress_var(90)
#GP.Post_return()
J = Jenkins(host, username='shenyizhong', password='shenyizhong')
job = J['SeetaNetLite-pro']
params = { "MODEL_ID": modelid, "OUTPUT_LAYER" : output_layer,"STMODEL_FID" : stmodel_fid}
r = job.invoke(block=True, build_params=params)
b = r.get_build()
logging.info('Finish JenkIn,build #: {} ,status : {}'.format(b.get_number(),b.get_status()))
if b.get_status() == "SUCCESS":
return_flag = "finish"
else:
err_msg = "JenkIn's Engine False"
return_flag = "error"
except Exception, e:
#GP.set_err_msg_var(repr(e))
#err_msg = repr(e)
err_msg = "unknown error from model convert!"
return_flag = "error"
finally:
GP.set_post_type_var(return_flag)
GP.set_err_msg_var(err_msg)
if return_flag == "finish":
GP.set_progress_var(100)
#post_return = { "posttype": return_flag, "progress" : 100, "pool_id": pool_id, "err_msg":err_msg }
else:
GP.set_progress_var(0)
#post_return = { "posttype": return_flag, "progress" : 0, "pool_id": pool_id, "err_msg":err_msg }
GP.Post_return()
t = threading.Thread(target=get_info_from_queue,args=(1,))
#get parameters and driver the transition function
@app.route('/convert',methods=['POST'])
def Dirver_Convert():
#get parameter modelid from post stream
return_flag = "false"
try:
modelid=request.form['modelid']
output_layer=request.form['output_layer']
pool_id=request.form['pool_id']
if modelid == '' or modelid == None or output_layer == '' or output_layer == None or pool_id == '' or output_layer == None:
logging.info("New Post Connect: get wrong parameter!")
return_flag = "false"
else:
return_flag = "true"
logging.info("New Post Connect: modelid : %s , post_url : %s, Queue size : %d" % (modelid,post_url,(Info_Queue.qsize() + 1)))
Post_Info = { "modelid": modelid, "output_layer" : output_layer,"pool_id" : pool_id}
Info_Queue.put(Post_Info)
if t.is_alive():
t.stop()
t = threading.Thread(target=get_info_from_queue,args=(1,))
t.start()
finally:
return return_flag
if __name__ == '__main__':
try:
t.start();
app.run(host='0.0.0.0')
finally:
curl_atlas_exe.close()
curl_atlas.close()
db_atlas.close()
stbf_rls()
#!/usr/bin/env python
#_*_ coding: UTF-8 _*_
__website__ = "www.seetatech.com"
__author__ = "seetatech"
__editor__ = "xuboxuan"
__Date__ = "20170812"
import requests
#post_url = "http://192.168l.1.170:1234/API"
post_url = "http://192.168.1.244:5000/result"
class GProgress_Var:
posttype = None;
progress = None;
pool_id = None;
err_msg = None;
def set_post_type_var(posttype):
GProgress_Var.posttype = posttype
def set_progress_var(progress):
GProgress_Var.progress = progress
def set_pool_id_var(pool_id):
GProgress_Var.pool_id = pool_id
def set_err_msg_var(err_msg):
GProgress_Var.err_msg = err_msg
def Post_return():
post_return = { "posttype": GProgress_Var.posttype, "progress" : GProgress_Var.progress, "pool_id": GProgress_Var.pool_id, "err_msg":GProgress_Var.err_msg }
requests.post(post_url, data=post_return)
DATA_NAME = 'data'
OUTPUT_LAYER = ''
#MODEL_PARAM = '/home/dev01/workshop/projects/MXNet2SeetaNet/model-0015.params'
#MODEL_JSON = '/home/dev01/workshop/projects/MXNet2SeetaNet/model-symbol.json'
LOAD_EPOCH = 15
INPUT_DATA_CHANNEL = 3
INPUT_DATA_HEIGHT = 248
INPUT_DATA_WIDTH = 248
protoc -I=./ --python_out=./ HolidayCNN_proto.proto
python Drive_Converter.py
#########################################################################
# name: keep.sh
# Author: seetatech xuboxuan
# mail: boxuan.xu@seetatech.com
# Created Time: 16 Aug 2017 07:49:46 PM CST
#########################################################################
#!/bin/bash
sn=`ps x | grep Drive_Converter.py | grep -v grep | awk '{print $1}'`
echo $sn
if [ "${sn}" = "" ] #如果为空,表示进程未启动
then
echo no sdk_process
mv log log_bak/`date +%s`.log.bak
nohup python Drive_Converter.py > log 2>&1 &
echo start success
fi
No preview for this file type
#_*_ coding: UTF-8 _*_
import json
import logging
import config as cfg
def fuzzy_query(name_list, key_words):
for name in name_list:
suffix = name.split('_')[-1]
if suffix == key_words:
return name
return None
class Layer(object):
def __init__(self):
self.name = ''
self.bottom_name = set()
self.bottom_idx = set()
self.top_name = set()
self.top_idx = set()
self.param = set() # param name
self.type = 'null'
self.attr = None
class Graph(object):
def __init__(self):
self.layer_idx = {} # name -> idx
self.idx_layer = {} # idx -> name
self.name_layer = {} # name -> layer
self.idx_count = 0
def add_name_layer(self, name, layer):
if name in self.name_layer:
logging.info('name %s: have been in name_layer' % name)
return
self.name_layer[name] = layer
def get_layer(self, name):
layer = self.name_layer.get(name, None)
if layer is None:
raise KeyError(name)
return layer
def get_idx(self, layer_name):
idx = self.layer_idx.get(layer_name, None)
if idx is None:
raise ValueError(layer_name)
return idx
def get_name(self, idx):
name = self.idx_layer.get(idx, None)
if name is None:
raise ValueError(idx)
return name
def get_root_layer(self):
pass
def get_all_layers(self):
return self.name_layer.values()
def __iter__(self):
return iter(self.name_layer.values())
def separate_layer_param(sym_name_list):
layer_output_name = []
param_name = []
for sym_name in sym_name_list:
if sym_name.split('_')[-1] == 'output':
idx = sym_name.rfind('_')
# layer_output_name.append(sym_name[:idx])
layer_output_name.append(sym_name)
else:
param_name.append(sym_name)
return layer_output_name, param_name
def set_top_name(graph):
for layer in graph:
for l in graph:
if layer.name in l.bottom_name:
layer.top_name.add(l.name)
# 最底层的数据层必须有两个top idx
if layer.name == cfg.DATA_NAME:
layer.top_name.add(cfg.DATA_NAME)
def set_idx(graph):
try:
from Queue import Queue
except:
from queue import Queue
name_set = set()
q = Queue()
q.put(graph.get_layer(cfg.DATA_NAME))
while q.qsize() > 0:
layer = q.get()
layer_name = layer.name
flag = False
for b in layer.bottom_name:
# 这里的处理是为了保证初始一个layer时,他的输入blob已经都被初始化
if b not in name_set:
q.put(layer)
flag = True
break
if flag:
continue
if layer_name in name_set:
continue
else:
name_set.add(layer_name)
graph.layer_idx[layer_name] = graph.idx_count
graph.idx_layer[graph.idx_count] = layer_name
if layer_name == cfg.DATA_NAME:
# idx=1是为了预留给label blob
graph.idx_count += 2
else:
graph.idx_count += 1
for n in layer.top_name:
q.put(graph.get_layer(n))
for layer in graph:
for b in layer.bottom_name:
layer.bottom_idx.add(graph.get_idx(b))
#for t in layer.top_name:
layer.top_idx.add(graph.layer_idx[layer.name])
if layer.name == cfg.DATA_NAME:
layer.top_idx.add(1)
def remove_layer(graph, type='Flatten'):
'''layer_name所指向的layer必须只能有一个输入,即len(bottom_name) == 1'''
remove_layer_list = []
for layer in graph:
if layer.type == type:
remove_layer_list.append(layer)
layer_name = layer.name
bottom_name = layer.bottom_name.pop()
if len(layer.bottom_name) > 0:
raise ValueError
for l in graph:
if layer_name in l.bottom_name:
l.bottom_name.remove(layer_name)
l.bottom_name.add(bottom_name)
for l in remove_layer_list:
logging.info('remove: {}'.format(l.name))
graph.name_layer.pop(l.name)
def mxnetbn_to_bn_scale(graph):
'''将mxnet的bn层分为SeetaNet(Caffe)中的bn和scale层'''
bn_layers = []
for layer in graph.get_all_layers():
if layer.type == 'BatchNorm':
bn_layers.append(layer)
for layer in bn_layers:
new_name = 'Scale-%s' % layer.name
scale_layer = Layer()
scale_layer.type = 'Scale'
scale_layer.name = new_name
graph.add_name_layer(new_name, scale_layer)
scale_layer.bottom_name.add(layer.name)
for l in graph.get_all_layers():
if layer.name in l.bottom_name and l.type != 'Scale':
l.bottom_name.remove(layer.name)
l.bottom_name.add(scale_layer.name)
gamma_name = fuzzy_query(layer.param, 'gamma')
beta_name = fuzzy_query(layer.param, 'beta')
layer.param.remove(gamma_name)
layer.param.remove(beta_name)
scale_layer.param.add(gamma_name)
scale_layer.param.add(beta_name)
def construct_graph(json_file):
graph = Graph()
with open(json_file) as f:
symbol = json.load(f)
node_num = len(symbol['nodes'])
nodes = symbol['nodes']
for i in range(node_num):
node = nodes[i]
if str(node['op']) == 'null' and str(node['name']) != cfg.DATA_NAME:
continue
layer_name = node['name']
layer = Layer()
layer.name = layer_name
layer.type = node['op']
# layer.top_name = '%s_output' % node['name']
layer.attr = node.get('attr', None)
graph.add_name_layer(layer.name, layer)
for input in node['inputs']:
input_node = nodes[input[0]]
if str(input_node['op']) != 'null' or (str(input_node['name']) == cfg.DATA_NAME):
layer.bottom_name.add(str(input_node['name']))
if str(input_node['op']) == 'null':
layer.param.add(str(input_node['name']))
if not str(input_node['name']).startswith(str(input_node['name'])):
raise NotImplementedError('shared param is not implemented')
if layer_name == cfg.OUTPUT_LAYER:
break
return graph
def load_graph(json_file):
logging.info('construct graph')
graph = construct_graph(json_file)
logging.info('remove Flatten layer')
# 由于SeetaNet(Caffe)没有Fatten层,因此将此层去掉
remove_layer(graph)
# copy_data_layer(graph)
logging.info('split bn layer to bn layer and scale layer')
mxnetbn_to_bn_scale(graph)
set_top_name(graph)
set_idx(graph)
logging.info('load graph over')
return graph
# if __name__ == '__main__':
# graph = load_graph(cfg.MODEL_JSON_FILE)
#!/bin/sh
kill -9 `ps x | grep Drive | grep -v grep | awk '{print $1}'`
sleep 1
mv log log_bak/`date +%s`.log.bak
nohup python Drive_Converter.py > log 2>&1 &
echo start success
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!