使用完毕在此整理一下代码,这里就只对有改动的地方贴一下啊,其他的直接去github上下载一下吧
https://github.com/Guzaiwang/CE-Net

数据输入文件data.py,其实没改动只是不做扩充加载了原始数据,下面会把改动的地方标为斜体,斜体好像显示不清楚,但是我标了地方都会有*号,注意一下吧,都在偏后,直接往下翻就是了。另外数据存放结构很简单如下所示:



标签就在labels里面了,和图像一一对应且同名。

"""
Based on https://github.com/asanakoy/kaggle_carvana_segmentation
"""
import torch
import torch.utils.data as data
from torch.autograd import Variable as V
from PIL import Imageimport cv2
import numpy as np
import os
import scipy.misc as miscdef randomHueSaturationValue(image, hue_shift_limit=(-180, 180),sat_shift_limit=(-255, 255),val_shift_limit=(-255, 255), u=0.5):if np.random.random() < u:image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)h, s, v = cv2.split(image)hue_shift = np.random.randint(hue_shift_limit[0], hue_shift_limit[1]+1)hue_shift = np.uint8(hue_shift)h += hue_shiftsat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])s = cv2.add(s, sat_shift)val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])v = cv2.add(v, val_shift)image = cv2.merge((h, s, v))#image = cv2.merge((s, v))image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)return imagedef randomShiftScaleRotate(image, mask,shift_limit=(-0.0, 0.0),scale_limit=(-0.0, 0.0),rotate_limit=(-0.0, 0.0), aspect_limit=(-0.0, 0.0),borderMode=cv2.BORDER_CONSTANT, u=0.5):if np.random.random() < u:height, width, channel = image.shapeangle = np.random.uniform(rotate_limit[0], rotate_limit[1])scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])sx = scale * aspect / (aspect ** 0.5)sy = scale / (aspect ** 0.5)dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)cc = np.math.cos(angle / 180 * np.math.pi) * sxss = np.math.sin(angle / 180 * np.math.pi) * syrotate_matrix = np.array([[cc, -ss], [ss, cc]])box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])box1 = box0 - np.array([width / 2, height / 2])box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])box0 = box0.astype(np.float32)box1 = box1.astype(np.float32)mat = cv2.getPerspectiveTransform(box0, box1)image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,borderValue=(0, 0,0,))mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,borderValue=(0, 0,0,))return image, maskdef randomHorizontalFlip(image, mask, u=0.5):if np.random.random() < u:image = cv2.flip(image, 1)mask = cv2.flip(mask, 1)return image, maskdef randomVerticleFlip(image, mask, u=0.5):if np.random.random() < u:image = cv2.flip(image, 0)mask = cv2.flip(mask, 0)return image, maskdef randomRotate90(image, mask, u=0.5):if np.random.random() < u:image=np.rot90(image)mask=np.rot90(mask)return image, maskdef default_loader(img_path, mask_path):img = cv2.imread(img_path)# print("img:{}".format(np.shape(img)))img = cv2.resize(img, (448, 448))mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)mask = 255. - cv2.resize(mask, (448, 448))img = randomHueSaturationValue(img,hue_shift_limit=(-30, 30),sat_shift_limit=(-5, 5),val_shift_limit=(-15, 15))img, mask = randomShiftScaleRotate(img, mask,shift_limit=(-0.1, 0.1),scale_limit=(-0.1, 0.1),aspect_limit=(-0.1, 0.1),rotate_limit=(-0, 0))img, mask = randomHorizontalFlip(img, mask)img, mask = randomVerticleFlip(img, mask)img, mask = randomRotate90(img, mask)mask = np.expand_dims(mask, axis=2)## print(np.shape(img))# print(np.shape(mask))img = np.array(img, np.float32).transpose(2,0,1)/255.0 * 3.2 - 1.6mask = np.array(mask, np.float32).transpose(2,0,1)/255.0mask[mask >= 0.5] = 1mask[mask <= 0.5] = 0#mask = abs(mask-1)return img, maskdef default_DRIVE_loader(img_path, mask_path):img = cv2.imread(img_path)img = cv2.resize(img, (448, 448))# mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)mask = np.array(Image.open(mask_path))mask = cv2.resize(mask, (448, 448))img = randomHueSaturationValue(img,hue_shift_limit=(-30, 30),sat_shift_limit=(-5, 5),val_shift_limit=(-15, 15))img, mask = randomShiftScaleRotate(img, mask,shift_limit=(-0.1, 0.1),scale_limit=(-0.1, 0.1),aspect_limit=(-0.1, 0.1),rotate_limit=(-0, 0))img, mask = randomHorizontalFlip(img, mask)img, mask = randomVerticleFlip(img, mask)img, mask = randomRotate90(img, mask)mask = np.expand_dims(mask, axis=2)img = np.array(img, np.float32).transpose(2, 0, 1) / 255.0 * 3.2 - 1.6mask = np.array(mask, np.float32).transpose(2, 0, 1) / 255.0mask[mask >= 0.5] = 1mask[mask <= 0.5] = 0# mask = abs(mask-1)return img, maskdef read_ORIGA_datasets(root_path, mode='train'):images = []masks = []if mode == 'train':read_files = os.path.join(root_path, 'Set_A.txt')else:read_files = os.path.join(root_path, 'Set_B.txt')image_root = os.path.join(root_path, 'images')gt_root = os.path.join(root_path, 'masks')for image_name in open(read_files):image_path = os.path.join(image_root, image_name.split('.')[0] + '.jpg')label_path = os.path.join(gt_root, image_name.split('.')[0] + '.jpg')print(image_path, label_path)images.append(image_path)masks.append(label_path)return images, masksdef read_Messidor_datasets(root_path, mode='train'):images = []masks = []if mode == 'train':read_files = os.path.join(root_path, 'train.txt')else:read_files = os.path.join(root_path, 'test.txt')image_root = os.path.join(root_path, 'save_image')gt_root = os.path.join(root_path, 'save_mask')for image_name in open(read_files):image_path = os.path.join(image_root, image_name.split('.')[0] + '.png')label_path = os.path.join(gt_root, image_name.split('.')[0] + '.png')images.append(image_path)masks.append(label_path)return images, masksdef read_RIM_ONE_datasets(root_path, mode='train'):images = []masks = []if mode == 'train':read_files = os.path.join(root_path, 'train_files.txt')else:read_files = os.path.join(root_path, 'test_files.txt')image_root = os.path.join(root_path, 'RIM-ONE-images')gt_root = os.path.join(root_path, 'RIM-ONE-exp1')for image_name in open(read_files):image_path = os.path.join(image_root, image_name.split('.')[0] + '.png')label_path = os.path.join(gt_root, image_name.split('.')[0] + '-exp1.png')images.append(image_path)masks.append(label_path)return images, masksdef read_DRIVE_datasets(root_path, mode='train'):images = []masks = []image_root = os.path.join(root_path, 'training/images')gt_root = os.path.join(root_path, 'training/1st_manual')for image_name in os.listdir(image_root):image_path = os.path.join(image_root, image_name.split('.')[0] + '.tif')label_path = os.path.join(gt_root, image_name.split('_')[0] + '_manual1.gif')images.append(image_path)masks.append(label_path)print(images, masks)return images, masksdef read_Cell_datasets(root_path, mode='train'):images = []masks = []image_root = os.path.join(root_path, 'train-images')gt_root = os.path.join(root_path, 'train-labels')for image_name in os.listdir(image_root):image_path = os.path.join(image_root, image_name)label_path = os.path.join(gt_root, image_name)images.append(image_path)masks.append(label_path)print(images, masks)return images, masksdef read_datasets_vessel(root_path, mode='train'):images = []masks = []image_root = os.path.join(root_path, 'training/images')gt_root = os.path.join(root_path, 'training/mask')for image_name in os.listdir(image_root):image_path = os.path.join(image_root, image_name)label_path = os.path.join(gt_root, image_name)if cv2.imread(image_path) is not None:if os.path.exists(image_path) and os.path.exists(label_path):images.append(image_path)masks.append(label_path)print(images[:10], masks[:10])return images, masks*def read_own_data(root_path, mode = 'train'):images = []masks = []image_root = os.path.join(root_path, 'train/imgs')gt_root = os.path.join(root_path, 'train/labels')for image_name in os.listdir(image_root):image_path = os.path.join(image_root, image_name)label_path = os.path.join(gt_root, image_name)images.append(image_path)masks.append(label_path)return images, masks**def own_data_loader(img_path, mask_path):img = cv2.imread(img_path)img = cv2.resize(img, (512, 512))mask = np.array(Image.open(mask_path))mask = cv2.resize(mask, (512, 512))mask = np.expand_dims(mask, axis=2)img = np.array(img, np.float32).transpose(2, 0, 1)mask = np.array(mask, np.float32).transpose(2, 0, 1)return img, mask*class ImageFolder(data.Dataset):def __init__(self,root_path, datasets='Messidor',  mode='train'):self.root = root_pathself.mode = modeself.dataset = datasets*assert self.dataset in ['RIM-ONE', 'Messidor', 'ORIGA', 'DRIVE', 'Cell', 'Vessel', 'own_data'],* \"the dataset should be in 'Messidor', 'ORIGA', 'RIM-ONE', 'Vessel', 'own_data'"if self.dataset == 'RIM-ONE':self.images, self.labels = read_RIM_ONE_datasets(self.root, self.mode)elif self.dataset == 'Messidor':self.images, self.labels = read_Messidor_datasets(self.root, self.mode)elif self.dataset == 'ORIGA':self.images, self.labels = read_ORIGA_datasets(self.root, self.mode)elif self.dataset == 'DRIVE':self.images, self.labels = read_DRIVE_datasets(self.root, self.mode)elif self.dataset == 'Cell':self.images, self.labels = read_Cell_datasets(self.root, self.mode)elif self.dataset == 'GAN_Vessel':self.images, self.labels = read_datasets_vessel(self.root, self.mode)*elif self.dataset == 'own_data':self.images, self.labels = read_own_data(self.root, self.mode)*else:print('Default dataset is Messidor')self.images, self.labels = read_Messidor_datasets(self.root, self.mode)def __getitem__(self, index):# img, mask = default_DRIVE_loader(self.images[index], self.labels[index]) *img, mask = own_data_loader(self.images[index], self.labels[index])*img = torch.Tensor(img)mask = torch.Tensor(mask)return img, maskdef __len__(self):assert len(self.images) == len(self.labels), 'The number of images must be equal to labels'return len(self.images)

训练文件main.py,这里面就按照他人要求加了学习率下降策略,可以和源码对照下增加或者保留想要的。

from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import cv2
import os
from tqdm import tqdm
from time import time
import torch
import torch.nn as nn
import torch.utils.data as data
from torch.autograd import Variable as V
from networks.cenet import CE_Net_
from framework import MyFrame
from loss import dice_bce_loss
from data import ImageFolder
from Visualizer import Visualizer
import Constants
import image_utils# Please specify the ID of graphics cards that you want to use
os.environ['CUDA_VISIBLE_DEVICES'] = "0"def CE_Net_Train():NAME = 'road'solver = MyFrame(CE_Net_, dice_bce_loss, 1e-3)batchsize = torch.cuda.device_count() * Constants.BATCHSIZE_PER_CARDscheduler=torch.optim.lr_scheduler.ExponentialLR(solver.optimizer, 0.9)dataset = ImageFolder(root_path=Constants.ROOT, datasets='own_data')data_loader = torch.utils.data.DataLoader(dataset,batch_size=batchsize,shuffle=True,num_workers=0)mylog = open('logs/' + NAME + '.log', 'w')no_optim = 0total_epoch = Constants.TOTAL_EPOCHtrain_epoch_best_loss = Constants.INITAL_EPOCH_LOSSfor epoch in range(1, total_epoch + 1):data_loader_iter = iter(data_loader)train_epoch_loss = 0index = 0scheduler.step()for img, mask in tqdm(data_loader_iter):solver.set_input(img, mask)train_loss, pred = solver.optimize()train_epoch_loss += train_lossindex = index + 1train_epoch_loss = train_epoch_loss/len(data_loader_iter)mylog.write('epoch: '+ str(epoch) + ' ' + ' train_loss: ' + str(train_epoch_loss.cpu().numpy()) + '\n')print('epoch:', epoch, 'train_loss:', train_epoch_loss.cpu().numpy(), 'lr: ' + format(scheduler.get_lr()[0]))solver.save('./weights/'+str(epoch)+'.th')mylog.flush()# print(mylog, 'Finish!')print('Finish!')mylog.close()if __name__ == '__main__':print(torch.__version__)CE_Net_Train()

预测文件test.py这个是复制了源码,新建的文件源码也有,可以对照一下,没什么大的变化,其实只是因为没做任何预处理,所以把代码里的相关多余操作去掉了,实际使用的是斜体那段代码。

import torch
import torch.nn as nn
import torch.utils.data as data
from torch.autograd import Variable as V
import sklearn.metrics as metrics
import cv2
import os
import numpy as np
from time import time
from PIL import Image
import warnings
warnings.filterwarnings('ignore')
from networks.cenet import CE_Net_BATCHSIZE_PER_CARD = 8class TTAFrame():def __init__(self, net):self.net = net().cuda()self.net = torch.nn.DataParallel(self.net, device_ids=range(torch.cuda.device_count()))def test_one_img_from_path(self, path, evalmode = True):if evalmode:self.net.eval()batchsize = torch.cuda.device_count() * BATCHSIZE_PER_CARDif batchsize >= 8:return self.test_one_img_from_path_1(path)elif batchsize >= 4:return self.test_one_img_from_path_2(path)elif batchsize >= 2:return self.test_one_img_from_path_4(path)*def test_one_img_from_path_8(self, path):img = cv2.imread(path)#.transpose(2,0,1)[None]img = cv2.resize(img,(512,512))img90 = np.array(np.rot90(img))img1 = np.concatenate([img[None],img90[None]])img2 = np.array(img1)[:,::-1]img3 = np.array(img1)[:,:,::-1]img4 = np.array(img2)[:,:,::-1]img1 = img1.transpose(0,3,1,2)img2 = img2.transpose(0,3,1,2)img3 = img3.transpose(0,3,1,2)img4 = img4.transpose(0,3,1,2)img1 = V(torch.Tensor(np.array(img1, np.float32)).cuda())img2 = V(torch.Tensor(np.array(img2, np.float32)).cuda())img3 = V(torch.Tensor(np.array(img3, np.float32)).cuda())img4 = V(torch.Tensor(np.array(img4, np.float32)).cuda())maska = self.net.forward(img1).squeeze().cpu().data.numpy()maskb = self.net.forward(img2).squeeze().cpu().data.numpy()maskc = self.net.forward(img3).squeeze().cpu().data.numpy()maskd = self.net.forward(img4).squeeze().cpu().data.numpy()mask1 = maska + maskb[:,::-1] + maskc[:,:,::-1] + maskd[:,::-1,::-1]mask2 = mask1[0] + np.rot90(mask1[1])[::-1,::-1]return mask2*def test_one_img_from_path_4(self, path):img = cv2.imread(path)#.transpose(2,0,1)[None]img = cv2.resize(img,(512,512))img90 = np.array(np.rot90(img))img1 = np.concatenate([img[None],img90[None]])img2 = np.array(img1)[:,::-1]img3 = np.array(img1)[:,:,::-1]img4 = np.array(img2)[:,:,::-1]img1 = img1.transpose(0,3,1,2)img2 = img2.transpose(0,3,1,2)img3 = img3.transpose(0,3,1,2)img4 = img4.transpose(0,3,1,2)img1 = V(torch.Tensor(np.array(img1, np.float32)/255.0 * 3.2 -1.6).cuda())img2 = V(torch.Tensor(np.array(img2, np.float32)/255.0 * 3.2 -1.6).cuda())img3 = V(torch.Tensor(np.array(img3, np.float32)/255.0 * 3.2 -1.6).cuda())img4 = V(torch.Tensor(np.array(img4, np.float32)/255.0 * 3.2 -1.6).cuda())maska = self.net.forward(img1).squeeze().cpu().data.numpy()maskb = self.net.forward(img2).squeeze().cpu().data.numpy()maskc = self.net.forward(img3).squeeze().cpu().data.numpy()maskd = self.net.forward(img4).squeeze().cpu().data.numpy()mask1 = maska + maskb[:,::-1] + maskc[:,:,::-1] + maskd[:,::-1,::-1]mask2 = mask1[0] + np.rot90(mask1[1])[::-1,::-1]return mask2def test_one_img_from_path_2(self, path):img = cv2.imread(path)#.transpose(2,0,1)[None]img = cv2.resize(img,(512,512))img90 = np.array(np.rot90(img))img1 = np.concatenate([img[None],img90[None]])img2 = np.array(img1)[:,::-1]img3 = np.concatenate([img1,img2])img4 = np.array(img3)[:,:,::-1]img5 = img3.transpose(0,3,1,2)# img5 = np.array(img5, np.float32)/255.0 * 3.2 -1.6img5 = np.array(img5, np.float32)img5 = V(torch.Tensor(img5).cuda())img6 = img4.transpose(0,3,1,2)# img6 = np.array(img6, np.float32)/255.0 * 3.2 -1.6img6 = np.array(img6, np.float32)img6 = V(torch.Tensor(img6).cuda())maska = self.net.forward(img5).squeeze().cpu().data.numpy()#.squeeze(1)maskb = self.net.forward(img6).squeeze().cpu().data.numpy()mask1 = maska + maskb[:,:,::-1]mask2 = mask1[:2] + mask1[2:,::-1]mask3 = mask2[0] + np.rot90(mask2[1])[::-1,::-1]return mask3def test_one_img_from_path_1(self, path):img = cv2.imread(path)#.transpose(2,0,1)[None]img = cv2.resize(img,(512,512))img90 = np.array(np.rot90(img))img1 = np.concatenate([img[None],img90[None]])img2 = np.array(img1)[:,::-1]img3 = np.concatenate([img1,img2])img4 = np.array(img3)[:,:,::-1]img5 = np.concatenate([img3,img4]).transpose(0,3,1,2)# img5 = np.array(img5, np.float32)/255.0 * 3.2 -1.6img5 = np.array(img5, np.float32)img5 = V(torch.Tensor(img5).cuda())mask = self.net.forward(img5).squeeze().cpu().data.numpy()#.squeeze(1)mask1 = mask[:4] + mask[4:,:,::-1]mask2 = mask1[:2] + mask1[2:,::-1]mask3 = mask2[0] + np.rot90(mask2[1])[::-1,::-1]return mask3def load(self, path):self.net.load_state_dict(torch.load(path))source = './road/val/imgs/'
val = os.listdir(source)
solver = TTAFrame(CE_Net_)
solver.load('./weights/100.th')
tic = time()
target = './result/'
os.mkdir(target)
for i,name in enumerate(val):mask = solver.test_one_img_from_path(source+name)mask[mask>0.5] = 255mask[mask<=0.5] = 0mask=cv2.resize(mask,(500,500),interpolation = cv2.INTER_NEAREST)mask = np.concatenate([mask[:,:,None],mask[:,:,None],mask[:,:,None]],axis=2)cv2.imwrite(target+name,mask.astype(np.uint8))

精度评定代码eval.py,这是二分类,代码如下:

# -*- coding: utf-8 -*-
import os
import cv2
import numpy as np
class IOUMetric:"""Class to calculate mean-iou using fast_hist method"""def __init__(self, num_classes):self.num_classes = num_classesself.hist = np.zeros((num_classes, num_classes))def _fast_hist(self, label_pred, label_true):# 找出标签中需要计算的类别,去掉了背景mask = (label_true >= 0) & (label_true < self.num_classes)        # # np.bincount计算了从0到n**2-1这n**2个数中每个数出现的次数,返回值形状(n, n)hist = np.bincount(self.num_classes * label_true[mask].astype(int) +label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)return hist# 输入:预测值和真实值# 语义分割的任务是为每个像素点分配一个labeldef evaluate(self, predictions, gts):for lp, lt in zip(predictions, gts):assert len(lp.flatten()) == len(lt.flatten())self.hist += self._fast_hist(lp.flatten(), lt.flatten())    # miouiou = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))miou = np.nanmean(iou) # -----------------其他指标------------------------------# mean accacc = np.diag(self.hist).sum() / self.hist.sum()acc_cls = np.nanmean(np.diag(self.hist) / self.hist.sum(axis=1))freq = self.hist.sum(axis=1) / self.hist.sum()fwavacc = (freq[freq > 0] * iou[freq > 0]).sum()return acc, acc_cls, iou, miou, fwavaccif __name__ == '__main__':label_path = './road/val/labels/'predict_path = './result/'pres = os.listdir(predict_path)labels = []predicts = []for im in pres:if im[-4:] == '.png':label_name = im.split('.')[0] + '.png'lab_path = os.path.join(label_path, label_name)pre_path = os.path.join(predict_path, im)label = cv2.imread(lab_path,0)pre = cv2.imread(pre_path,0)pre[pre>0] = 1labels.append(label)predicts.append(pre)el = IOUMetric(2)   #注意了二分类写2的额acc, acc_cls, iou, miou, fwavacc = el.evaluate(predicts, labels)print('acc: ',acc)print('acc_cls: ',acc_cls)print('iou: ',iou)print('miou: ',miou)print('fwavacc: ',fwavacc)

最后精度评定如下:
('acc: ', 0.9635741133786848)
('acc_cls: ', 0.9319977560822872)
('iou: ', array([0.96197102, 0.53645078]))
('miou: ', 0.7492109018048626)
('fwavacc: ', 0.9419769529495335)
结果并不是很好,你们自己调一下吧
部分结果如下:

UNET家族网络之CE-Net(github复现)相关推荐

  1. UNET家族网络之Unet++(附带了Nestnet、uent、PSPnet等)

    最近复现了一下unet++,发现这个项目里包含了很多网络,推荐给大家一下,GitHub链接:https://github.com/MrGiovanni/UNetPlusPlus/tree/master ...

  2. 网络音乐盒CE欧盟认证详解

    CE认证需要的基本资料 1 客户申请表(英文:产品名称CE认证.型号CE认证.申请人/制造厂地址). 2 产品型号及详细技术参数 3 零部件和整体结构的详细图片 4 电器原理图(电气产品) 5 机械装 ...

  3. Github复现之D-LinkNet(补全了验证部分代码,效果还行)

    链接:https://github.com/zlkanata/DeepGlobe-Road-Extraction-Challenge 多一嘴,这里面还带了unet,可以跑跑对比下别的unet哪个效果好 ...

  4. 如何解决因网络问题导致的 github 下载出错

    适用于 ubuntu 20.04 ubuntu 20.04 是 "西柚云" 主要使用的操作系统 西柚云官网 你还在因为 github 访问太慢而感到焦虑吗? 你还在因为无法访问 g ...

  5. 基于U-Net+残差网络的语义分割缺陷检测

    一.介绍 基于深度学习的缺陷检测主要集中在场景识别.object detection等方法,近年来,推出了一系列优秀的语义分割模型,比如SegNet.FCN.U-Net等.语义分割模型被广泛的应用到场 ...

  6. 几种Transformer+CNN(U-net)网络

    一. 对比 U-Net Transformer 优点 融合深层语义信息和高精度特征所含信息 提取全局信息 不足 无法对距离较远的特征的上下文关系进行建模 缺少局部细节处的信息 二. 网络 1. Tra ...

  7. 亚像素卷积网络(ESPCN)学习与Pytorch复现

    论文内容 论文地址:Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolution ...

  8. 长安“战疫”网络安全卫士守护赛_crypto_复现

    长安"战疫"网络安全卫士守护赛_Crypto math 涉及的知识点:RSA加密未知模数,已知p对q的逆元以及q对p的逆元求RSA的模数N 题目描述 题目没有描述,只有已知量c,e ...

  9. 156个Python网络爬虫资源,GitHub上awesome系列之Python爬虫工具

    项目地址:lorien/awesome-web-scraping GitHub上awesome系列之Python的爬虫工具. 本列表包含Python网页抓取和数据处理相关的库. 网络相关 通用 url ...

最新文章

  1. java web 心跳机制实现,基于javax的websocket服务端实现,含心跳机制
  2. 【程序员眼中的统计学(12)】相关与回归:我的线条如何? (转)
  3. [原创 URL重写步骤
  4. 计算机网络物理层知识要点:通信基础、传输介质和设备
  5. MATLAB转化彩色图片为灰度图片命令行窗口代码:
  6. TNS-12535: TNS: 操作超时_win7
  7. LeetCode 154. 寻找旋转排序数组中的最小值 II (二分)
  8. C#6中的新增功能 【Unity3D亲测】
  9. 根据银行卡号获取银行名称
  10. 如何把Word的默认页面颜色由白色改为绿色
  11. python判断手机号运营商_基于python的-使用正则表达式验证手机号并匹配运营商和所述地域...
  12. SSR 实战:官网开发指南
  13. PS一键制作针织毛衣纹理图案效果
  14. 超级应用 - 免费应用内测托管平台|APP应用分发平台|iOS应用分发|Android应用分发|免费应用内测托管平台 源码下载
  15. 处理工具提示的TTN_NEEDTEXT通知
  16. 中兴B860AV2.1B电视盒子刷机固件
  17. 斐波那契凤尾————斐波那契数列、打表
  18. 用java输出学生信息表代码_使用java导出学生成绩表-excel成绩表
  19. Android之TBS浏览Word、Excel、PPT、PDF等文件
  20. 2014 北京、西安邀请赛

热门文章

  1. mysql 校验 身份证_通过SQL校验身份证号码是否正确
  2. android stuido 如何使用SIM2或双SIM卡提交短信
  3. chrome 在win7上不能正常运行或者卡顿的解决方法
  4. python简单超级马里奥游戏下载大全_Python实现超级玛丽游戏系列教程01玛丽登场...
  5. 微信app支付 java后台接Android
  6. ArcGIS 10安装及破解
  7. Archicad二次开发——三角剖分
  8. 【Ansible常用模块】
  9. java.lang.UnsatisfiedLinkError: no rxtxSerial in java.library.path
  10. 网鱼网咖-利用数加快速搭建大数据平台,极致洞察,为客户带来从所未有的体验。