inception_v2网络

# -*- coding: utf-8 -*-
# https://www.w3cschool.cn/tensorflow_python/tensorflow_python-13p92sws.html      //layers
# https://my.oschina.net/u/876354/blog/1637819     GoogLeNet(v1到v4)
# https://www.zhihu.com/question/49039504          GoogLeNet实现
# https://blog.csdn.net/m0_37987687/article/details/80241893
# https://www.jianshu.com/p/cb8ebcee1b15     BN层参数保存

import os
import tensorflow as tf
import 数据处理_hss as data_hss
import shutil
import numpy as np
import time



def conv1d_relu(X,filters,kernel_size,strides,is_training):


    X_change = tf.layers.conv1d(X, filters, kernel_size, strides, 'same', use_bias=True, activation=None)
    X_change = tf.layers.batch_normalization(X_change,training = is_training)
    X_change = tf.nn.relu(X_change)

    return X_change

def model_inception_v2():

    input__data = tf.placeholder(tf.float32, [None, 75])
    # print("input__data",input__data)
    input__label = tf.placeholder(tf.float32, [None, 2])
    inputdata = tf.reshape(input__data,[-1,75,1])
    is_training = tf.placeholder(tf.bool)
    # print("is_training",is_training)
    conv1_7x7 = conv1d_relu(inputdata, 64, 7, 2 ,is_training)
    pool1_3x3 = tf.layers.max_pooling1d(conv1_7x7, 3, 2, padding='same')

    #深度学习的局部响应归一化LRN层,对局部神经元的活动创建竞争机制,使得其中响应比较大的值变得相对更大,并抑制其他反馈较小的神经元,增强了模型的泛化能力。
    # pool1_3x3 = tf.nn.local_response_normalization(pool1_3x3)   # 图像
    conv2_3x3_reduce = conv1d_relu(pool1_3x3, 64, 1, 1,is_training)
    conv2_3x3 = conv1d_relu(conv2_3x3_reduce, 192, 3, 1,is_training)
    # conv2_3x3 = tf.nn.local_response_normalization(conv2_3x3)
    pool2_3x3 = tf.layers.max_pooling1d(conv2_3x3, 3, 2, padding='same')

    # 3a ###########
    inception_3a_1_1 = conv1d_relu(pool2_3x3, 64, 1, 1,is_training)
    inception_3a_3_3_reduce = conv1d_relu(pool2_3x3, 96, 1, 1,is_training)
    inception_3a_3_3 = conv1d_relu(inception_3a_3_3_reduce, 128, 3, 1,is_training)
    inception_3a_5_5_reduce = conv1d_relu(pool2_3x3, 16, 1, 1,is_training)
    inception_3a_5_5 = conv1d_relu(inception_3a_5_5_reduce, 32, 3, 1,is_training)
    inception_3a_5_5 = conv1d_relu(inception_3a_5_5, 32, 3, 1,is_training)

    inception_3a_pool = tf.layers.max_pooling1d(pool2_3x3, 3, 1, padding='same')
    inception_3a_pool_1_1 = conv1d_relu(inception_3a_pool, 32, 1, 1,is_training)

    inception_3a_output = tf.concat([inception_3a_1_1,inception_3a_3_3,inception_3a_5_5,inception_3a_pool_1_1],axis=2)
    # 3b ###########
    inception_3b_1_1 = conv1d_relu(inception_3a_output, 128, 1, 1,is_training)
    inception_3b_3_3_reduce = conv1d_relu(inception_3a_output, 128, 1, 1,is_training)
    inception_3b_3_3 = conv1d_relu(inception_3b_3_3_reduce, 192, 3, 1,is_training)
    inception_3b_5_5_reduce = conv1d_relu(inception_3a_output, 32, 1, 1,is_training)
    inception_3b_5_5 = conv1d_relu(inception_3b_5_5_reduce, 96, 3, 1,is_training)
    inception_3b_5_5 = conv1d_relu(inception_3b_5_5, 96, 3, 1,is_training)

    inception_3b_pool = tf.layers.max_pooling1d(inception_3a_output, 3, 1, padding='same')
    inception_3b_pool_1_1 = conv1d_relu(inception_3b_pool, 64, 1, 1,is_training)
    inception_3b_output = tf.concat([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5,inception_3b_pool_1_1],axis=2)
    pool3_3_3 = tf.layers.max_pooling1d(inception_3b_output, 3, 2, padding='same')

    # 4a ###########
    inception_4a_1_1 = conv1d_relu(pool3_3_3, 192, 1, 1,is_training)
    inception_4a_3_3_reduce = conv1d_relu(pool3_3_3, 96, 1, 1,is_training)
    inception_4a_3_3 = conv1d_relu(inception_4a_3_3_reduce, 208, 3, 1,is_training)
    inception_4a_5_5_reduce = conv1d_relu(pool3_3_3, 16, 1, 1,is_training)
    inception_4a_5_5 = conv1d_relu(inception_4a_5_5_reduce, 48, 3, 1,is_training)
    inception_4a_5_5 = conv1d_relu(inception_4a_5_5, 48, 3, 1,is_training)

    inception_4a_pool = tf.layers.max_pooling1d(pool3_3_3, 3, 1, padding='same')
    inception_4a_pool_1_1 = conv1d_relu(inception_4a_pool, 64, 1, 1,is_training)
    inception_4a_output = tf.concat([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1],axis=2)
    # 4b ###########
    inception_4b_1_1 = conv1d_relu(inception_4a_output, 160, 1, 1,is_training)
    inception_4b_3_3_reduce = conv1d_relu(inception_4a_output, 112, 1, 1,is_training)
    inception_4b_3_3 = conv1d_relu(inception_4b_3_3_reduce, 224, 3, 1,is_training)
    inception_4b_5_5_reduce = conv1d_relu(inception_4a_output, 24, 1, 1,is_training)
    inception_4b_5_5 = conv1d_relu(inception_4b_5_5_reduce, 64, 3, 1,is_training)
    inception_4b_5_5 = conv1d_relu(inception_4b_5_5, 64, 3, 1,is_training)

    inception_4b_pool = tf.layers.max_pooling1d(inception_4a_output, 3, 1, padding='same')
    inception_4b_pool_1_1 = conv1d_relu(inception_4b_pool, 64, 1, 1,is_training)
    inception_4b_output = tf.concat([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5,inception_4b_pool_1_1],axis=2)
    # 4c ###########
    inception_4c_1_1 = conv1d_relu(inception_4b_output, 128, 1, 1,is_training)
    inception_4c_3_3_reduce = conv1d_relu(inception_4b_output, 128, 1, 1,is_training)
    inception_4c_3_3 = conv1d_relu(inception_4c_3_3_reduce, 128, 3, 1,is_training)
    inception_4c_5_5_reduce = conv1d_relu(inception_4b_output, 24, 1, 1,is_training)
    inception_4c_5_5 = conv1d_relu(inception_4c_5_5_reduce, 64, 3, 1,is_training)
    inception_4c_5_5 = conv1d_relu(inception_4c_5_5, 64, 3, 1,is_training)

    inception_4c_pool = tf.layers.max_pooling1d(inception_4b_output, 3, 1, padding='same')
    inception_4c_pool_1_1 = conv1d_relu(inception_4c_pool, 64, 1, 1,is_training)
    inception_4c_output = tf.concat([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1],axis=2)
    # 4d ###########
    inception_4d_1_1 = conv1d_relu(inception_4c_output, 112, 1, 1,is_training)
    inception_4d_3_3_reduce = conv1d_relu(inception_4c_output, 144, 1, 1,is_training)
    inception_4d_3_3 = conv1d_relu(inception_4d_3_3_reduce, 288, 3, 1,is_training)
    inception_4d_5_5_reduce = conv1d_relu(inception_4c_output, 32, 1, 1,is_training)
    inception_4d_5_5 = conv1d_relu(inception_4d_5_5_reduce, 64, 3, 1,is_training)
    inception_4d_5_5 = conv1d_relu(inception_4d_5_5, 64, 3, 1,is_training)

    inception_4d_pool = tf.layers.max_pooling1d(inception_4c_output, 3, 1, padding='same')
    inception_4d_pool_1_1 = conv1d_relu(inception_4d_pool, 64, 1, 1,is_training)
    inception_4d_output = tf.concat([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1],axis=2)
    # 4e ###########
    inception_4e_1_1 = conv1d_relu(inception_4d_output, 256, 1, 1,is_training)
    inception_4e_3_3_reduce = conv1d_relu(inception_4d_output, 160, 1, 1,is_training)
    inception_4e_3_3 = conv1d_relu(inception_4e_3_3_reduce, 320, 3, 1,is_training)
    inception_4e_5_5_reduce = conv1d_relu(inception_4d_output, 32, 1, 1,is_training)
    inception_4e_5_5 = conv1d_relu(inception_4e_5_5_reduce, 128, 3, 1,is_training)
    inception_4e_5_5 = conv1d_relu(inception_4e_5_5, 128, 3, 1,is_training)

    inception_4e_pool = tf.layers.max_pooling1d(inception_4d_output, 3, 1, padding='same')
    inception_4e_pool_1_1 = conv1d_relu(inception_4e_pool, 128, 1, 1,is_training)
    inception_4e_output = tf.concat([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=2)
    pool4_3_3 = tf.layers.max_pooling1d(inception_4e_output, 3, 2, padding='same')
    # 5a ###########
    inception_5a_1_1 = conv1d_relu(pool4_3_3, 256, 1, 1,is_training)
    inception_5a_3_3_reduce = conv1d_relu(pool4_3_3, 160, 1, 1,is_training)
    inception_5a_3_3 = conv1d_relu(inception_5a_3_3_reduce, 320, 3, 1,is_training)
    inception_5a_5_5_reduce = conv1d_relu(pool4_3_3, 32, 1, 1,is_training)
    inception_5a_5_5 = conv1d_relu(inception_5a_5_5_reduce, 128, 3, 1,is_training)
    inception_5a_5_5 = conv1d_relu(inception_5a_5_5, 128, 3, 1,is_training)

    inception_5a_pool = tf.layers.max_pooling1d(pool4_3_3, 3, 1, padding='same')
    inception_5a_pool_1_1 = conv1d_relu(inception_5a_pool, 128, 1, 1,is_training)
    inception_5a_output = tf.concat([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1],axis=2)
    # 5b ###########
    inception_5b_1_1 = conv1d_relu(inception_5a_output, 384, 1, 1,is_training)
    inception_5b_3_3_reduce = conv1d_relu(inception_5a_output, 192, 1, 1,is_training)
    inception_5b_3_3 = conv1d_relu(inception_5b_3_3_reduce, 384, 3, 1,is_training)
    inception_5b_5_5_reduce = conv1d_relu(inception_5a_output, 48, 1, 1,is_training)
    inception_5b_5_5 = conv1d_relu(inception_5b_5_5_reduce, 128, 3, 1,is_training)
    inception_5b_5_5 = conv1d_relu(inception_5b_5_5, 128, 3, 1,is_training)

    inception_5b_pool = tf.layers.max_pooling1d(inception_5a_output, 3, 1, padding='same')
    inception_5b_pool_1_1 = conv1d_relu(inception_5b_pool, 128, 1, 1,is_training)
    inception_5b_output = tf.concat([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1],axis=2)

    ##########
    keepprob = tf.placeholder(tf.float32)
    pool5_7_7 = tf.layers.average_pooling1d(inception_5b_output, 3, 1, padding='valid')   # inception_5b_output

    pool5_7_7 = tf.layers.dropout(pool5_7_7, training=is_training)
    print("pool5_7_7 = ",pool5_7_7.shape)
    # 铺平图像数据
    pool5_7_7_flat = tf.layers.Flatten()(pool5_7_7)
    print("pool5_7_7_flat = ",pool5_7_7_flat.shape)

    #全连接层计算
    yconv = tf.layers.dense(pool5_7_7,2)   #_flat,activation = tf.nn.softmax
    # print("y_conv = ",y_conv.shape)
    out = tf.nn.softmax(yconv,name = "out")   # 保存成.pb模型需要用到


    return input__data,input__label,keepprob,yconv,is_training,out

def optimization(yconv,input__label):

    # 计算交叉熵损失
    crossentropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=yconv, labels=input__label))
    # 创建优化器,通知Tensorflow在训练时要更新均值和方差的分布
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        trainstep = tf.train.AdamOptimizer(1e-2).minimize(crossentropy)
    # 创建计算准确度的操作
    correct_prediction = tf.equal(tf.argmax(yconv, 1), tf.argmax(input__label, 1))
    accuracyrate = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    return crossentropy,trainstep,accuracyrate


if __name__ == '__main__':


    start = time.time()
    file_1 = r'D:\hss_python_work\resnet_cnn\数据集\N'    #文件夹
    file_2 = r'D:\hss_python_work\resnet_cnn\数据集\S'
    file_3 = r'D:\hss_python_work\resnet_cnn\数据集\T'
    file_4 = r'D:\hss_python_work\resnet_cnn\数据集\V'
    file_5 = r'D:\hss_python_work\resnet_cnn\数据集\X'
    file_NO_X = r'D:\hss_python_work\resnet_cnn\数据集\非X'

    data_all_1,label_all_1 = data_hss.data_analysis(file_NO_X,label_def = [0,1],proportion = 800)    # 先定义标签
    data_all_2,label_all_2 = data_hss.data_analysis(file_5,label_def = [1,0],proportion = 50)    # 先定义标签

    data_all = data_all_1 + data_all_2
    label_all = label_all_1 + label_all_2
    print("data_all = ",len(data_all))
    print("label_all = ",len(label_all))

    data_train,label_train,data_test,label_tast = data_hss.Dataset_partition(data_all,label_all)
    print(data_train.shape,label_train.shape,data_test.shape,label_tast.shape)



    sess = tf.InteractiveSession()
    input_data,input_label,keep_prob,y_conv,is_training = model_inception_v2()
    cross_entropy,train_step,accuracy_rate = optimization(y_conv,input_label)


    ########开始训练过程########
    # 初始化所有变量、参数
    tf.global_variables_initializer().run()
    var_list = [var for var in tf.global_variables() if "moving" in var.name]
    var_list += tf.trainable_variables()
    m_saver = tf.train.Saver(var_list=var_list, max_to_keep=5)



    # ########### 训练(设置训练时Dropout的kepp_prob比率为0.5。mini-batch为50,进行2000次迭代训练,参与训练样本5万)
    # model_doc = r'model'  # 存储模型的文件夹
    # if not os.path.exists(model_doc):   # 模型存储文件夹
    #     os.makedirs(model_doc)
    # else:
    #     shutil.rmtree(model_doc)   #先强制删除文件夹
    #     os.makedirs(model_doc)
    #
    # train_accuracy_all = []   #保存所有准确度
    # max_acc = 0
    # f = open('model/acc.txt','w')
    #
    # for i in range(10000):
    #
    #     batch_data_train,batch_label_train = data_hss.batch(data_train,label_train,batch_size = 200)
    #     # print("batch_data_train = ",batch_data_train,batch_data_train.shape)
    #     # print("batch_label_train = ",batch_label_train,batch_label_train.shape)
    #
    #     # 循环次数是100的倍数的时候,打印
    #     if i%1 == 0:
    #
    #         train_accuracy = accuracy_rate.eval(feed_dict={input_data:batch_data_train,input_label: batch_label_train,
    #                                                        keep_prob: 0.5,is_training : False})
    #         print ("-->step %d, training accuracy %.4f ,max_acc %.4f"%(i, train_accuracy,max_acc))
    #         print("cross_entropy = ",sess.run(cross_entropy,feed_dict={input_data:batch_data_train,input_label: batch_label_train,
    #                                                                    keep_prob: 0.5,is_training : False}))
    #         f.write(str(i)+', train_accuracy: '+str(train_accuracy)+'  '+str(max_acc) +'\n')
    #         # #保存最近的5次模型
    #         # m_saver.save(sess, './model/model.ckpt', global_step=i)
    #         #保存准确度最高的5次模型
    #         if train_accuracy >= max_acc :
    #             max_acc = train_accuracy
    #         if train_accuracy >= max_acc - 0.04:
    #             m_saver.save(sess, './model/model.ckpt', global_step=i)
    #
    #         train_accuracy_all.append(train_accuracy)
    #
    #     if max_acc >= 0.95 and train_accuracy < 0.5:   #训练准确度过了最高点并下降,就停止训练
    #         print("break reason 1")
    #         break
    #     if (len(train_accuracy_all) >= 5      #训练准确度大于5次,且最后的5次准确度全在95%以上,就停止
    #         and train_accuracy_all[len(train_accuracy_all) - 1] > 0.97
    #         and train_accuracy_all[len(train_accuracy_all) - 2] > 0.97
    #         and train_accuracy_all[len(train_accuracy_all) - 3] > 0.97
    #         and train_accuracy_all[len(train_accuracy_all) - 4] > 0.97
    #         and train_accuracy_all[len(train_accuracy_all) - 5] > 0.97) :
    #         # print(train_accuracy_all)
    #         print("break reason 2")
    #         break
    #
    #     # 执行训练模型
    #     train_step_,loss = sess.run([train_step,cross_entropy], feed_dict={input_data:batch_data_train,
    #                                                                                input_label: batch_label_train,
    #                                                                                keep_prob: 0.5,
    #                                                                                is_training : True})
    #
    #
    #     # 打印测试集正确率 ,全部训练完成之后,在最终测试集上进行全面测试,得到整体的分类准确率
    #     # print(train_accuracy_all)
    #     if max_acc >= 0.9 and train_accuracy < 0.5:   #训练准确度过了最高点并下降,就停止训练
    #         break
    # f.close()


    #######模型读回及预测

    model_file=tf.train.latest_checkpoint('model/')
    m_saver.restore(sess,model_file)


    a = 0   # 预测对的个数
    TP = 0  # 预测正确的病症个数
    FN_TP = 0 # 原标签中有病症的个数
    TN = 0  # 预测正确的非病症个数
    TN_FP = 0 # 原标签中正常的个数
    sensibility = 0  # 敏感性
    specificity = 0  # 特异性

    #### 批量心拍预测
    output = sess.run(y_conv , feed_dict = {input_data:data_test, keep_prob: 1.0,is_training : False})
    print("output = ",output)
    output = sess.run(tf.nn.softmax(output))
    output = np.round(output)  #np.round(output)  #取最接近的整数
    print("output = ",output)
    print("label_tast = ",label_tast)

    for i in range(0,len(data_test)):
        if label_tast[i][0] == output[i][0] and label_tast[i][1] == output[i][1] :
            a +=1
        if label_tast[i][0] == output[i][0] and output[i][0] == 1:  #敏感性
            TP += 1
        if label_tast[i][0] == 1 :
            FN_TP += 1
        if label_tast[i][1] == output[i][1] and output[i][1] == 1:  #特异性
            TN += 1
        if label_tast[i][1] == 1 :
            TN_FP += 1

    ### 单个心拍预测
    single_data = np.empty([1,75])
    for i in range(0,len(data_test)):

        single_data[0] = data_test[i]
        # print("single_data = ",single_data)
        # print("single_label = ",single_label)

        output = sess.run(y_conv , feed_dict = {input_data:single_data, keep_prob: 1.0,is_training : False})
        # print("output = ",output)
        output = sess.run(tf.nn.softmax(output))
        output = np.round(output)  #np.round(output)  #取最接近的整数
        print(i,"/",len(data_test)-1,"  output = ",output,"single_label = ",label_tast[i])
        if label_tast[i][0] == output[0][0] and label_tast[i][1] == output[0][1] :
            a +=1

        if label_tast[i][0] == output[0][0] and output[0][0] == 1:  #敏感性
            TP += 1
        if label_tast[i][0] == 1 :
            FN_TP += 1

        if label_tast[i][1] == output[0][1] and output[0][1] == 1:  #特异性
            TN += 1
        if label_tast[i][1] == 1 :
            TN_FP += 1



    print("len(data_test) = ",len(data_test),"a =",a)
    print("sensibility = ",TP/FN_TP,"specificity =",TN/TN_FP)


    end = time.time()
    print("程序运行时间:",end - start)

©著作权归作者所有,转载或内容合作请联系作者
【社区内容提示】社区部分内容疑似由AI辅助生成,浏览时请结合常识与多方信息审慎甄别。
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

相关阅读更多精彩内容

友情链接更多精彩内容