ir_version: 3
producer_name: "onnx.utils.extract_model"
graph {
node {
input: "Input3"
input: "Constant339"
output: "Minus340_Output_0"
name: "Minus340"
op_type: "Sub"
doc_string: ""
domain: ""
}
node {
input: "Minus340_Output_0"
input: "Constant343"
output: "Block352_Output_0"
name: "Block352"
op_type: "Div"
doc_string: ""
domain: ""
}
name: "Extracted from {CNTKGraph}"
initializer {
data_type: 1
float_data: 127.5
name: "Constant339"
}
initializer {
data_type: 1
float_data: 255.0
name: "Constant343"
}
input {
name: "Input3"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 64
}
dim {
dim_value: 64
}
}
}
}
}
output {
name: "Block352_Output_0"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 64
}
dim {
dim_value: 64
}
}
}
}
}
value_info {
name: "Minus340_Output_0"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 64
}
dim {
dim_value: 64
}
}
}
}
}
value_info {
name: "Block352_Output_0"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 64
}
dim {
dim_value: 64
}
}
}
}
}
}
opset_import {
domain: ""
version: 7
}
# 了解了onnx的结构后,我们可以根据它的结构将其拆分成多个单节点的onnx模型,以便于对整体模型的单个节点进行测试和分析。
import onnx
from onnx import helper,numpy_helper
def show_weight(weight):
print("="*10, "details of weight: ", weight.name, "="*10)
print("data type: ", weight.data_type)
print("shape: ", weight.dims)
data_numpy = numpy_helper.to_array(weight)
# data_numpy = np.frombuffer(weight.raw_data, dtype=xxx)
# print("detail data:", data_numpy)
print("="*40)
# onnx.utils.extract_model("emotion-ferplus-7.onnx","mini_model.onnx",["Input3"],["Block352_Output_0"])
model = onnx.load("emotion-ferplus-7.onnx")
# print(model.ir_version) # IR的版本
# print(model.producer_name) #
# print(model.opset_import) # opset 版本信息
# graph
# graph中有node(NodeProto类型),input(ValueInfoProto类型),output(ValueInfoProto类型)和initializer(TensorProto类型)
# 其中node中存放着模型中的所有计算节点,input中存放着模型所有的输入节点,output存放着模型所有的输出节点,initializer存放着模型所有的权重常量 tensor;
#value_info存放了各个 动态tensor 的信息
#在ONNX模型中,initializer和value_info都是用于描述模型中的张量(tensor)的,但是它们的用途有所不同。
# initializer是一个包含模型中所有预先初始化的张量的列表。这些张量通常是模型的权重和偏差,它们在模型训练过程中被学习,并在模型推理过程中被使用。initializer中的每个元素都是一个TensorProto对象,包含了张量的数据类型、形状和值。
# value_info则是用于描述模型中的输入、输出和中间结果的张量的。它包含了张量的名称、数据类型和形状,但不包含张量的值。value_info主要用于在模型的图形定义中,描述那些不是模型输入也不是模型输出,但在模型计算过程中会被使用的张量。
# 总的来说,initializer和value_info都是用于描述模型中的张量的,但initializer更关注于张量的值,而value_info更关注于张量的元数据。
# node 通过input和output的指向关系,描绘出一个深度学习模型的拓扑图
# for node in model.graph.node:
# print(node)
# print(model.graph.input)
# print(model.graph.output)
# #获取节点数量
# print(len(model.graph.node))
# # with open("model.txt","w") as f:
# # f.write(str(model))
# # print(model.graph)
# print(getNodeNameList(model))
# print("-----------------------------------------------")
# #如何修改一个initializer 的值??
# # 移除旧的initializer 添加一个新的initializer
# # 直接修改当前的initializer
# for initializer in model.graph.initializer:
# print(initializer)
# # model.graph.initializer.remove(next(init for init in model.graph.initializer if init.name == 'Constant343'))
# model.graph.initializer.remove(model.graph.initializer[0])
# # 创建一个新的TensorProto对象作为新的initializer
# new_initializer = helper.make_tensor(name = 'Constant339', data_type = onnx.TensorProto.FLOAT,
# dims = [1], vals = [255.0], raw=False)
# model.graph.initializer.append(new_initializer)
# print("-----------------------------------------------")
# for initializer in model.graph.initializer:
# print(initializer)
# print("-----------------------------------------------")
# init = model.graph.initializer[0]
# print(init)
# init.name = "Constant343_new"
# #data是一个类似列表的对象,按照列表的方式去操作
# # init.float_data.pop()
# # init.float_data[:] = []
# # init.float_data.extend([123.0])
# # print(dir(init.float_data))
# init.float_data[0] = 123.50
# print("-----------------------------------------------")
# print(model.graph.initializer)
#####################################################################################################################
for info in model.graph.value_info:
# info.type.tensor_type.shape.dim[:] = [1,2,3]
from onnx import TensorShapeProto, TensorShapeProto
# 添加一个维度
dim = TensorShapeProto.Dimension()
dim.dim_value = 666 # 设置新维度的大小
info.type.tensor_type.shape.dim.append(dim)
info.type.tensor_type.shape.dim[0].dim_value = 333
# 删除一个维度
del info.type.tensor_type.shape.dim[2]
print(info.type.tensor_type.shape.dim)
# print(dir(info.type.tensor_type.shape.dim))
# tensorname_to_info = {info.name:info for info in model.graph.value_info}
# print(tensorname_to_info)
init_maper = {init.name:init for init in model.graph.initializer}
valueinfo_maper = {value_info.name:value_info for value_info in model.graph.value_info}
node_mapper = {node.name:node for init in model.graph.node}
def get_parent_node(graph, node):
parents = []
for input_name in node.input:
for n in graph.node:
if input_name in n.output:
parents.append(n)
return parents
def get_children_node(graph, node):
children = []
for output_name in node.output:
for n in graph.node:
if output_name in n.input:
children.append(n)
return children
node = node_mapper["noode_name"]
parents = get_parent_node(model.graph, node)
children = get_children_node(model.graph, node)
def remove_node_by_name(graph, node_name):
# 标记需要移除的node
node_to_remove = None
for node in graph.node:
if node.name == node_name:
node_to_remove = node
break
if node_to_remove is None:
return False
# 删除该node的所有输入和输出edge
# 把即将删除节点输出的tensor重定向,改接向即将删除节点输入的tensor
for output in node_to_remove.output:
for node in graph.node:
for index, input in enumerate(node.input):
if input == output:
node.input[index] = node_to_remove.input[0]
# 从graph中移除这个node
graph.node.remove(node_to_remove)
return True
##在后面插入一个新的node
def insert_node_after(graph, existing_node_name, new_node):
# 添加新节点到图中,并更新输入输出连接
nodes = graph.node
for i, node in enumerate(nodes):
if node.name == existing_node_name:
# 假设现有节点的输出仅链接到另外一个节点
existing_output_name = node.output[0]
# 修改现有节点的输出名称
new_output_name = new_node.output[0]
node.output[0] = new_output_name
# 更新所有引用原先输出名称的节点的对应输入
for later_node in nodes[i+1:]:
for j, input_name in enumerate(later_node.input):
if input_name == existing_output_name:
later_node.input[j] = new_output_name
# 插入新节点到现有节点后面
nodes.insert(i + 1, new_node)
break
def insert_node_before(graph, existing_node_name, new_node):
# 查找现有节点以及它的输入
existing_node_index = None
existing_node_input = None
for index, node in enumerate(graph.node):
if node.name == existing_node_name:
existing_node_index = index
existing_node_input = node.input
break
if existing_node_index is None:
raise ValueError(f"No node with name {existing_node_name} found in the graph.")
# 新节点将采用现有节点的输入,并将其输出设置为现有节点的新输入
# 假设新节点有一个输出
new_node_output = new_node.output[0]
# 所有原来指向现有节点输入的连接现在应该指向新节点的输出
for node in graph.node:
for i, input_name in enumerate(node.input):
if input_name in existing_node_input:
node.input[i] = new_node_output
# 现有节点的新输入将是新节点的输出
graph.node[existing_node_index].input[:] = [new_node_output]
# 将新节点插入到图中现有节点之前的位置
graph.node.insert(existing_node_index, new_node)
# NodeNameList = []
# for i in range(len(model.graph.node)):
# node = model.graph.node[i]
# print(node.input)
# print(node.output)
# print(node.attribute)
# NodeNameList.append(model.graph.node[i].name)
# out_tvi = [inner_output for inner_output in model.graph.value_info if inner_output.name == name]
https://github.com/ZhangGe6/onnx-modifier/tree/master
https://github.com/bindog/onnx-surgery/blob/master/surgery.py
https://bindog.github.io/blog/2020/03/13/deep-learning-model-convert-and-depoly/
https://www.zhihu.com/question/386526462
https://blog.csdn.net/ChuiGeDaQiQiu/article/details/123794387