【人工智能项目】深度学习实现胸腔X光肺炎检测
【人工智能项目】深度学习实现胸腔X光肺炎检测
本次主要是任务:训练模型正确识别肺炎X光图片,0=正常,1=肺炎。
那么代码走起!!!
导包
# 导包
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from glob import glob
from keras.models import *
from keras.layers import *
from keras.preprocessing.image import *
from keras.utils import *
from keras.optimizers import *
from keras.applications import *
from keras.applications import imagenet_utils
from keras.callbacks import EarlyStopping,ReduceLROnPlateau,ModelCheckpoint,LearningRateScheduler
from efficientnet.keras import EfficientNetB3
EDA探索数据
# EDA探索数据
import matplotlib.pyplot as plt
import seaborn as sns
import ostrain_dir_path = "./xray_dataset/train"
train_normal_path = os.path.join(train_dir_path,"NORMAL")
train_pneumonia_path = os.path.join(train_dir_path,"PNEUMONIA")
train_normal_length = len(os.listdir(train_normal_path))
train_pneumonia_length = len(os.listdir(train_pneumonia_path))sns.set_style("whitegrid")
sns.barplot(x = ["NORMAL","PNEUMONIA"],y = np.array([train_normal_length,train_pneumonia_length]))
labels = "Normal","Pneumonia"
sizes = [train_normal_length,train_pneumonia_length]
colors = ["green","red"]
explode = (0.1,0)plt.pie(sizes,labels=labels,colors=colors,explode=explode)
plt.axis("equal")
plt.show()
读取数据
img_rows,img_cols = 224,224
batch_size= 2
# ImageDataGenerator读取数据
from keras.preprocessing.image import ImageDataGenerator
import ostrain_path = "C:\\Users\\LvChaoZhang\\00contest\\09 ai研习社胸腔X光肺炎检测\\xray_dataset\\train"# train_datagen = ImageDataGenerator(rescale = 1./255,
# shear_range = 0.2,
# zoom_range = 0.2,
# horizontal_flip = True,
# vertical_flip=True,
# validation_split=0.2,
# fill_mode="nearest")
train_datagen = ImageDataGenerator(rescale = 1./255,validation_split=0.2)train_set = train_datagen.flow_from_directory(train_path,target_size=(img_rows,img_cols),batch_size=batch_size,color_mode="rgb",class_mode="binary",shuffle=True,seed=2019,subset="training")
val_set = train_datagen.flow_from_directory(train_path,target_size=(img_rows,img_cols),batch_size=batch_size,class_mode="binary",color_mode="rgb",shuffle=False,subset="validation")
Found 3280 images belonging to 2 classes.
Found 819 images belonging to 2 classes.
train_set.class_indices
{'NORMAL': 0, 'PNEUMONIA': 1}
模型
# 初始模型
model = Sequential()model.add(Conv2D(32,(3,3),input_shape=(img_rows,img_cols,3),activation='relu'))
model.add(Conv2D(32,(3,3),activation='relu'))model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))model.add(Conv2D(64,(3,3),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))model.add(Conv2D(128,(3,3),activation='relu'))
model.add(Conv2D(128,(3,3),activation='relu'))model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))model.add(Flatten())
# model.add(Dense(units=256,activation='relu'))
# model.add(Dropout(0.2))
model.add(Dense(units=256,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(units=1,activation='sigmoid'))
# 模型 VGG161.0
base_model = VGG16(weights="imagenet",include_top=False,input_shape=(img_rows,img_cols,3))
for layer in base_model.layers:layer.trainable=False
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(128,activation="relu")(x)
x = Dropout(0.5)(x)
out = Dense(1,activation="sigmoid")(x)
model = Model(base_model.input,out)
# 模型4.0model = Sequential()scale = 32# convolution 1
model.add(Conv2D(scale,(3,3),input_shape=(img_rows,img_cols,3)))
model.add(LeakyReLU(alpha=0.1))
# convolution 2
model.add(Conv2D(2*scale,(3,3)))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.1))# convolution 3
model.add(Conv2D(3*scale,(3,3)))
model.add(LeakyReLU(alpha=0.1))
# convolution 4
model.add(Conv2D(4*scale,(3,3)))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.1))# convolution 5
model.add(Conv2D(5*scale,(3,3)))
model.add(LeakyReLU(alpha=0.1))
# convolution 6
model.add(Conv2D(3*scale,(3,3)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.1))# convolution 7
model.add(Conv2D(6*scale,(3,3)))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.1))# convolution 8
model.add(Conv2D(7*scale,(3,3)))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.1))# flatten layer
model.add(Flatten())# first dense layer
model.add(Dense(units=15*scale))
model.add(LeakyReLU(alpha=0.1))
model.add(Dropout(0.5))# second dense layer
model.add(Dense(units=15*scale))
model.add(LeakyReLU(alpha=0.1))
model.add(Dropout(0.5))# third dense layer
model.add(Dense(units=15*scale))
model.add(LeakyReLU(alpha=0.1))
model.add(Dropout(0.5))# output layer
model.add(Dense(1,activation="sigmoid"))
# 试验模型Efficientnet
base_model = EfficientNetB3(weights="imagenet",include_top=False,input_shape=(img_rows,img_cols,3))
for layer in base_model.layers:layer.trainable=True
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(10,activation="relu")(x)
x = Dropout(0.5)(x)
out = Dense(1,activation="sigmoid")(x)
model = Model(base_model.input,out)
模型编译
# 模型编译
import keras.backend as K# focal loss
def focal_loss(alpha=0.25,gamma=2.0):def focal_crossentropy(y_true, y_pred):bce = K.binary_crossentropy(y_true, y_pred)y_pred = K.clip(y_pred, K.epsilon(), 1.- K.epsilon())p_t = (y_true*y_pred) + ((1-y_true)*(1-y_pred))alpha_factor = 1modulating_factor = 1alpha_factor = y_true*alpha + ((1-alpha)*(1-y_true))modulating_factor = K.pow((1-p_t), gamma)# compute the final loss and returnreturn K.mean(alpha_factor*modulating_factor*bce, axis=-1)return focal_crossentropy
model.compile(loss=focal_loss(),metrics=["accuracy"],optimizer=Adam(lr=0.0001))
# Implement Learning rate decay
checkpoint = ModelCheckpoint("chest_xray_cnn3.h5",monitor="val_loss",mode="min",save_best_only = True,verbose=1)earlystop = EarlyStopping(monitor = 'val_loss', min_delta = 0, patience = 5,verbose = 1,restore_best_weights = True)reduce_lr = ReduceLROnPlateau(monitor = 'val_loss',factor = 0.2,patience = 3,verbose = 1)#min_delta = 0.00001)callbacks = [earlystop, checkpoint, reduce_lr]
# 训练
history = model.fit_generator(train_set,steps_per_epoch=3280//batch_size,epochs = 25,validation_data=val_set,validation_steps=819 //batch_size,shuffle=True,callbacks=callbacks,class_weight={0:1.84307554 ,1:0.68613994})
# 训练曲线
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline#Accuracy
plt.plot(history.history["acc"])
plt.plot(history.history["val_acc"])
plt.title("Model Accuracy")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["Training Set","Validation Set"],loc="upper left")# Loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["Training set","Test set"],loc="upper left")plt.show()
预测
import cv2
import numpy as np
import pandas as pd# 定义读取图片函数
def get_img(file_path,img_rows,img_cols):img = cv2.imread(file_path)img = cv2.resize(img,(img_rows,img_cols),interpolation=cv2.INTER_CUBIC)
# if img.shape[2] == 1:
# img = np.dstack([img,img,img])img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)img = img.astype(np.float32)/255.return img
# 定义加载测试集的函数
def load_test_data(test_path,img_rows,img_cols):x_test_id = []x_test = []img_names = os.listdir(test_path)img_names.sort(key=lambda x:int(x[:-4]))for img_name in img_names:feature = get_img(os.path.join(test_path,img_name),img_rows,img_cols)id = img_namex_test_id.append(id)x_test.append(feature)#对x_test进行转换x_test = np.array(x_test)return x_test,x_test_id
#test_dir_path = "./xray_dataset/test/"
test_dir_path = "D:\\01\\05CT\\ct\\test\\"
x_test,x_id = load_test_data(test_dir_path,img_rows,img_cols)
model.load_weights("chest_xray_cnn1.h5")
y_pred = model.predict(x_test)
y_pred[:10]
for i in range(len(x_test)):if y_pred[i]>0.5:y_pred[i]=1;else:y_pred[i]=0;
id = np.arange(len(x_test))
df = pd.DataFrame({"id":id,"predict":y_pred[:,0]})
df.to_csv("submit1.csv",index=None,header=None)
小结
那么本次到此结束!下次见瓷!
【人工智能项目】深度学习实现胸腔X光肺炎检测相关推荐
- “创新实践”项目介绍3:《利用深度学习进行VR手柄光点检测》
指导教师点评 这个项目来源于企业的实际需求,所采用的技术方案是最新的深度学习物体检测技术.技术方案具有一定的难度,很有挑战.通过这个项目,学生可以接触真正的需求,提升分析问题和解决问题的能力. 项目名 ...
- 热门 | Google Brain前员工深度盘点2017人工智能和深度学习各大动态
翻译 | AI科技大本营 参与 | shawn 编辑 | Donna 2017年是人工智能井喷的一年.Google Brain团队前成员Denny Britz在自己的博客WILDML上对过去一年人工智 ...
- AlphaGo、人工智能、深度学习解读以及应用
经过几天的比拼,AlphaGo最终还是胜出,创造了人机大战历史上的一个新的里程碑.几乎所有的人都在谈论这件事情,这使得把"人工智能"."深度学习"的热潮推向了新 ...
- H2O机器学习:一种强大的可扩展的人工智能和深度学习技术
书名:基于H2O的机器学习实用方法:一种强大的可扩展的人工智能和深度学习技术 原书名:Practical Machine Learning with H2O:Powerful, Scalable Te ...
- 港科夜闻|香港科大陈启峰教授:人工智能与深度学习交织-从竞赛开始,到无尽钻研...
关注并星标 每周阅读港科夜闻 建立新视野 开启新思维 1.香港科大陈启峰教授:"人工智能与深度学习交织 - 从竞赛开始,到无尽钻研".人工智能与深度学习交织,未来的科技似乎将出现更 ...
- 【人工智能】深度学习、数据库选择和人工智能的革命;人工智能是解锁IoT潜力的钥匙
深度学习(DL)和人工智能(AI)已经不再是科幻小说中遥不可及的目标,目前已成为了互联网和大数据等领域的前沿研究内容. 由于云计算提供强的计算能力.提出的先进算法以及充裕的资金,这创造了五年前难以想象 ...
- 人工智能、深度学习和AIoT
1 引言 如果从人类最初的幻想开始算起,人工智能的历史非常久远,也许能与人类文明比肩.而现代化的人工智能历史是从1956年达特茅斯会议开始的.在这之后,人工智能的研究几经起落,符号主义.联结主义.专家 ...
- 人工智能和深度学习发展趋势_AI在学习和发展中的作用
人工智能和深度学习发展趋势 In this series of blogs, AI in HR, we already understood what is AI, what is HR and be ...
- 深度学习(一): 人工智能-机器学习-深度学习的区别
人工智能-机器学习-深度学习 他们之间是有区别的 先来一张图做一下解释 从发展历史上来看 AI:让机器展现出人类智力 回到1956年夏天,在当时的会议上,AI先驱的梦想是建造一台复杂的机器(让当时刚出 ...
最新文章
- 学习 Message(14): 区分左右 Shift、Ctrl、Alt
- python app服务器_Python应用02 Python服务器进化
- swift string转int_swift中结构体和类的区别(值类型和引用类型的区别)
- 改开源项目的一些心得
- ios7之后的一些更改
- oracle数据库连接违反,Oracle 数据库连接的一些坑
- 佳肴_纪中1420_dfs
- 拿别人源码去申请软著_别拿自己的尺子,去丈量别人的生活!
- 如何在阿里云linux上部署java项目
- 【系统分析师之路】系统分析师历年真题大汇总
- docker 启动服务 Up Less than a second ago
- 颠覆大数据分析之Storm的设计模式
- 基于SSM整合阿里直播服务----java代码实现
- fastqc检验时不能执行java_fastqc对原始测序reads质控
- Linux系统编程 50 -stat和stat函数 穿透和非穿透
- 游戏道具平台|基于Springboot+Vue实现游戏道具平台系统
- 上拉电阻和下拉电阻判断
- 北京银行:首日上市定位预测(1)
- Java-并发知识体系-思维导图
- OA办公系统的几种报价模式
热门文章
- PCL库安装下载(百度云盘链接)
- mysql 5.7 ERROR 1045 (28000): Access denied for user 'root'@'localhost'
- 零基础python视频教程谁的好_零基础自学Python!自我提升视频教程,新手必看
- Linux查看文件cat、tail、vim
- AS3隐藏特性—Sprite对象的尺寸
- java 汉字转换成拼音(mark帖)
- 基于51单片机《按键控制流水灯》
- hdu 1372 knight
- 动态规划之双11的红包雨
- vue绑定键盘事件无效问题,vue绑定键盘delete事件示例,组合键绑定