参考链接:https://blog.csdn.net/zhuzemin45/article/details/79709874
代码链接:https://github.com/delta-onera/segnet_pytorch/blob/master/segnet.py

SegNet和FCN,DeConv思路十分相似,只是Encoder,Decoder(Upsampling)使用的技术不一致。此外SegNet的编码器部分使用的是VGG16的前13层卷积网络,每个编码器层都对应一个解码器层,最终解码器的输出被送入soft-max分类器以独立的为每个像素产生类概率。上采样处采用了Unpool进行上采样。

代码:

import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDictclass SegNet(nn.Module):def __init__(self,input_nbr,label_nbr):super(SegNet, self).__init__()batchNorm_momentum = 0.1self.conv11 = nn.Conv2d(input_nbr, 64, kernel_size=3, padding=1)self.bn11 = nn.BatchNorm2d(64, momentum= batchNorm_momentum)self.conv12 = nn.Conv2d(64, 64, kernel_size=3, padding=1)self.bn12 = nn.BatchNorm2d(64, momentum= batchNorm_momentum)self.conv21 = nn.Conv2d(64, 128, kernel_size=3, padding=1)self.bn21 = nn.BatchNorm2d(128, momentum= batchNorm_momentum)self.conv22 = nn.Conv2d(128, 128, kernel_size=3, padding=1)self.bn22 = nn.BatchNorm2d(128, momentum= batchNorm_momentum)self.conv31 = nn.Conv2d(128, 256, kernel_size=3, padding=1)self.bn31 = nn.BatchNorm2d(256, momentum= batchNorm_momentum)self.conv32 = nn.Conv2d(256, 256, kernel_size=3, padding=1)self.bn32 = nn.BatchNorm2d(256, momentum= batchNorm_momentum)self.conv33 = nn.Conv2d(256, 256, kernel_size=3, padding=1)self.bn33 = nn.BatchNorm2d(256, momentum= batchNorm_momentum)self.conv41 = nn.Conv2d(256, 512, kernel_size=3, padding=1)self.bn41 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)self.conv42 = nn.Conv2d(512, 512, kernel_size=3, padding=1)self.bn42 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)self.conv43 = nn.Conv2d(512, 512, kernel_size=3, padding=1)self.bn43 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)self.conv51 = nn.Conv2d(512, 512, kernel_size=3, padding=1)self.bn51 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)self.conv52 = nn.Conv2d(512, 512, kernel_size=3, padding=1)self.bn52 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)self.conv53 = nn.Conv2d(512, 512, kernel_size=3, padding=1)self.bn53 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)self.conv53d = nn.Conv2d(512, 512, kernel_size=3, padding=1)self.bn53d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)self.conv52d = nn.Conv2d(512, 512, kernel_size=3, padding=1)self.bn52d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)self.conv51d = nn.Conv2d(512, 512, kernel_size=3, padding=1)self.bn51d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)self.conv43d = nn.Conv2d(512, 512, kernel_size=3, padding=1)self.bn43d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)self.conv42d = nn.Conv2d(512, 512, kernel_size=3, padding=1)self.bn42d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)self.conv41d = nn.Conv2d(512, 256, kernel_size=3, padding=1)self.bn41d = nn.BatchNorm2d(256, momentum= batchNorm_momentum)self.conv33d = nn.Conv2d(256, 256, kernel_size=3, padding=1)self.bn33d = nn.BatchNorm2d(256, momentum= batchNorm_momentum)self.conv32d = nn.Conv2d(256, 256, kernel_size=3, padding=1)self.bn32d = nn.BatchNorm2d(256, momentum= batchNorm_momentum)self.conv31d = nn.Conv2d(256,  128, kernel_size=3, padding=1)self.bn31d = nn.BatchNorm2d(128, momentum= batchNorm_momentum)self.conv22d = nn.Conv2d(128, 128, kernel_size=3, padding=1)self.bn22d = nn.BatchNorm2d(128, momentum= batchNorm_momentum)self.conv21d = nn.Conv2d(128, 64, kernel_size=3, padding=1)self.bn21d = nn.BatchNorm2d(64, momentum= batchNorm_momentum)self.conv12d = nn.Conv2d(64, 64, kernel_size=3, padding=1)self.bn12d = nn.BatchNorm2d(64, momentum= batchNorm_momentum)self.conv11d = nn.Conv2d(64, label_nbr, kernel_size=3, padding=1)def forward(self, x):# Stage 1x11 = F.relu(self.bn11(self.conv11(x)))x12 = F.relu(self.bn12(self.conv12(x11)))x1p, id1 = F.max_pool2d(x12,kernel_size=2, stride=2,return_indices=True)# Stage 2x21 = F.relu(self.bn21(self.conv21(x1p)))x22 = F.relu(self.bn22(self.conv22(x21)))x2p, id2 = F.max_pool2d(x22,kernel_size=2, stride=2,return_indices=True)# Stage 3x31 = F.relu(self.bn31(self.conv31(x2p)))x32 = F.relu(self.bn32(self.conv32(x31)))x33 = F.relu(self.bn33(self.conv33(x32)))x3p, id3 = F.max_pool2d(x33,kernel_size=2, stride=2,return_indices=True)# Stage 4x41 = F.relu(self.bn41(self.conv41(x3p)))x42 = F.relu(self.bn42(self.conv42(x41)))x43 = F.relu(self.bn43(self.conv43(x42)))x4p, id4 = F.max_pool2d(x43,kernel_size=2, stride=2,return_indices=True)# Stage 5x51 = F.relu(self.bn51(self.conv51(x4p)))x52 = F.relu(self.bn52(self.conv52(x51)))x53 = F.relu(self.bn53(self.conv53(x52)))x5p, id5 = F.max_pool2d(x53,kernel_size=2, stride=2,return_indices=True)# Stage 5dx5d = F.max_unpool2d(x5p, id5, kernel_size=2, stride=2)x53d = F.relu(self.bn53d(self.conv53d(x5d)))x52d = F.relu(self.bn52d(self.conv52d(x53d)))x51d = F.relu(self.bn51d(self.conv51d(x52d)))# Stage 4dx4d = F.max_unpool2d(x51d, id4, kernel_size=2, stride=2)x43d = F.relu(self.bn43d(self.conv43d(x4d)))x42d = F.relu(self.bn42d(self.conv42d(x43d)))x41d = F.relu(self.bn41d(self.conv41d(x42d)))# Stage 3dx3d = F.max_unpool2d(x41d, id3, kernel_size=2, stride=2)x33d = F.relu(self.bn33d(self.conv33d(x3d)))x32d = F.relu(self.bn32d(self.conv32d(x33d)))x31d = F.relu(self.bn31d(self.conv31d(x32d)))# Stage 2dx2d = F.max_unpool2d(x31d, id2, kernel_size=2, stride=2)x22d = F.relu(self.bn22d(self.conv22d(x2d)))x21d = F.relu(self.bn21d(self.conv21d(x22d)))# Stage 1dx1d = F.max_unpool2d(x21d, id1, kernel_size=2, stride=2)x12d = F.relu(self.bn12d(self.conv12d(x1d)))x11d = self.conv11d(x12d)return x11ddef load_from_segnet(self, model_path):s_dict = self.state_dict()# create a copy of the state dictth = torch.load(model_path).state_dict() # load the weigths# for name in th:# s_dict[corresp_name[name]] = th[name]self.load_state_dict(th)

附代码 SegNet相关推荐

  1. 【AI超级美发师】深度学习算法打造染发特效(附代码)

    [新智元导读]如今,在类似天天P图.美图秀秀等手机APP中,给指定照片或视频中的人物更换头发颜色已经是再正常不过的事情了.那么本文便介绍了该功能背后如AI头发分割模块.头发换色.颜色增强与修正模块等技 ...

  2. 换发型算法_【AI超级美发师】深度学习算法打造染发特效(附代码)

    原标题:[AI超级美发师]深度学习算法打造染发特效(附代码) 来源:OpenCV学堂 作者:胡耀武 [新智元导读]如今,在类似天天P图.美图秀秀等手机APP中,给指定照片或视频中的人物更换头发颜色已经 ...

  3. Get了!用Python制作数据预测集成工具 | 附代码

    作者 | 李秋键 责编 | 晋兆雨 大数据预测是大数据最核心的应用,是它将传统意义的预测拓展到"现测".大数据预测的优势体现在,它把一个非常困难的预测问题,转化为一个相对简单的描述 ...

  4. java中自造类是什么意思_Java建造者模式是什么?如何实现?(附代码)

    本篇文章给大家带来的内容是关于Java建造者模式是什么?如何实现?(附代码),有一定的参考价值,有需要的朋友可以参考一下,希望对你有所帮助. 建造者模式 一.什么是建筑者模式? 建造者模式(Build ...

  5. 一文弄懂元学习 (Meta Learing)(附代码实战)《繁凡的深度学习笔记》第 15 章 元学习详解 (上)万字中文综述

    <繁凡的深度学习笔记>第 15 章 元学习详解 (上)万字中文综述(DL笔记整理系列) 3043331995@qq.com https://fanfansann.blog.csdn.net ...

  6. html5自定义属性作用,html5自定义属性:如何获取自定义属性值(附代码)

    这篇文章给大家介绍的内容是关于html5自定义属性:如何获取自定义属性值(附代码),有一定的参考价值,有需要的朋友可以参考一下,希望对你有所帮助. 自定义属性: 在HTML5中我们可以自定义属性,其格 ...

  7. 手把手教你用Keras进行多标签分类(附代码)_数据派THU-CSDN博客 (翻译:程思衍校对:付宇帅)

    手把手教你用Keras进行多标签分类(附代码)_数据派THU-CSDN博客 手把手教你用Keras进行多标签分类(附代码)_数据派THU-CSDN博客

  8. 独家 | 手把手教TensorFlow(附代码)

    上一期我们发布了"一文读懂TensorFlow(附代码.学习资料)",带领大家对TensorFlow进行了全面了解,并分享了入门所需的网站.图书.视频等资料,本期文章就来带你一步步 ...

  9. MobileViT: 一种更小,更快,高精度的轻量级Transformer端侧网络架构(附代码实现)...

    点击上方,选择星标或置顶,不定期资源大放送! 阅读大概需要5分钟 Follow小博主,每天更新前沿干货 [导读]之前详细介绍了轻量级网络架构的开源项目,详情请看深度学习中的轻量级网络架构总结与代码实现 ...

  10. 【卷积神经网络结构专题】一文详解AlexNet(附代码实现)

    关注上方"深度学习技术前沿",选择"星标公众号", 资源干货,第一时间送达! [导读]本文是卷积神经网络结构系列专题第二篇文章,前面我们已经介绍了第一个真正意义 ...

最新文章

  1. 全球首个AI设计药物诞生,淘宝新增垃圾识别功能……
  2. linux python软连接_Linux软链接的创建,删除,修改
  3. python reduce函数 day16
  4. 线性表的链式存储结构(C语言版)
  5. Node.js Performance
  6. LinkedHashMap的使用
  7. [html] 说说你对WEB标准和W3C的理解与认识?
  8. 机器学习接口和代码之 线性回归
  9. erp 维护费 要交吗_ERP系统维护费
  10. JavaScript复习使用定时器的简易式诸葛大力轮播图
  11. 未来属于SVO? - KlayGE游戏引擎
  12. SilverLight学习笔记--关于Silverlight资源文件(如:图片)的放置位置及其引用
  13. 学到了!程序员大神用这招让开发效率直接提升10倍!!
  14. linux中断响应时间太慢_Linux中的进程调度有哪些核心概念?
  15. 华为无线路由器信道怎么测试软件,路由器无线信道是什么怎么设置
  16. 立创eda学习笔记二十九:原理图转PCB
  17. 蚁群算法讲解python
  18. 男子学比基尼电影派对开泡澡电影院 泡澡电影被赞好创意!
  19. 访谈:Joel Spolsky谈开发人员角色的变化
  20. Python项目:外星人入侵(汇总)

热门文章

  1. FAT32文件系统详解
  2. html5在线留言表单代码,HTML CSS样式用户留言表单代码分享
  3. foxmail创建html模板,化繁为简--轻轻松松用模板之FOXMAIL篇
  4. 8051蜂鸣器程序c语言,蜂鸣器报警声C51程序
  5. 转载--32个鲜为人知的自学网站
  6. 小米手机怎么打不开云服务器错误,为什么小米云服务打不开页面?
  7. 按键精灵手机助手错误:at tempt to compare nu11 with number
  8. 1 常见的HTTP股票数据接口整理 腾讯 新浪 网易 2019-08-02
  9. 网易云课堂C++开发工程师案例-网吧收银系统(MFC+ADO) C++收银...
  10. C#药店进销存管理系统,源码分享