tensorrt代码:

https://github.com/azhe198827/retinaface_tensorRT

https://github.com/lesurp/retinanet_cpp/blob/master/examples/single_image.cpp

https://github.com/wuchaodzxx/tensorrt_retinaface

废话不说,上代码

retina face,

import os
import time
from math import ceilimport onnxruntime
import numpy as np
import cv2
import argparse
import argparse
import numpy as np
from data import cfg_mnet, cfg_peleenet
from utils.nms.py_cpu_nms import py_cpu_nms
from math import ceil
from itertools import product as product#sigmoid函数
def sigmoid(x):s = 1 / (1 + np.exp(-1*x))return s
def softmax(x, axis=1):# 计算每行的最大值row_max = x.max(axis=axis)# 每行元素都需要减去对应的最大值,否则求exp(x)会溢出,导致inf情况row_max = row_max.reshape(-1, 1)x = x - row_maxx_exp = np.exp(x)x_sum = np.sum(x_exp, axis=axis, keepdims=True)s = x_exp / x_sumreturn sdef decode_landm(pre, priors, variances):"""Decode landm from predictions using priors to undothe encoding we did for offset regression at train time.Args:pre (tensor): landm predictions for loc layers,Shape: [num_priors,10]priors (tensor): Prior boxes in center-offset form.Shape: [num_priors,4].variances: (list[float]) Variances of priorboxesReturn:decoded landm predictions"""landms = np.concatenate((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],), 1)return landmsdef decode(loc, priors, variances):"""Decode locations from predictions using priors to undothe encoding we did for offset regression at train time.Args:loc (tensor): location predictions for loc layers,Shape: [num_priors,4]priors (tensor): Prior boxes in center-offset form.Shape: [num_priors,4].variances: (list[float]) Variances of priorboxesReturn:decoded bounding box predictions"""boxes = np.concatenate((priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],priors[:, 2:] * np.exp(loc[:, 2:] * variances[1])), 1)boxes[:, :2] -= boxes[:, 2:] / 2boxes[:, 2:] += boxes[:, :2]return boxesclass PriorBox(object):def __init__(self, cfg, image_size=None, phase='train'):super(PriorBox, self).__init__()self.min_sizes = cfg['min_sizes']self.steps = cfg['steps']self.image_size = image_sizeself.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps]def forward(self):anchors = []for k, f in enumerate(self.feature_maps):min_sizes = self.min_sizes[k]for i, j in product(range(f[0]), range(f[1])):for min_size in min_sizes:s_kx = min_size / self.image_size[1]s_ky = min_size / self.image_size[0]dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]for cy, cx in product(dense_cy, dense_cx):anchors += [cx, cy, s_kx, s_ky]# back to torch landoutput = np.array(anchors)output = output.reshape(-1, 4)return output
def sigmoid(x):# TODO: Implement sigmoid functionreturn 1/(1 + np.exp(-x))
def main():parser = argparse.ArgumentParser()parser.add_argument('--input_shape', help="caffe's caffemodel file path",default=(480,360))parser.add_argument('--img_path', help="test image path", default="d:/cat.jpg")parser.add_argument('--onnx_path', help="onnx model file path",  default="mobileretina.onnx")# parser.add_argument('--onnx_path', help="onnx model file path",  default=r"pelee_detector.onnx")# parser.add_argument('--onnx_path', help="onnx model file path",  default="yolov3.onnx")parser.add_argument('--confidence_threshold', help="confidence threshold", default=0.9, type=float)parser.add_argument('--nms_thres', help="nms threshold",  default=0.6, type=float)parser.add_argument('--top_k',help="to choice anchors", default=20, type=int)parser.add_argument('--nms_threshold',help="to choice anchors", default=0.4, type=float)parser.add_argument('--show_image',help="to choice anchors", default=True, type=bool)args = parser.parse_args()cfg = cfg_peleenetonnx_path = args.onnx_pathsession = onnxruntime.InferenceSession(onnx_path)input_shape = args.input_shape #模型输入尺寸nms_threshold = args.nms_thresimg_path = args.img_pathprint("image path:",img_path)print("onnx model path:",onnx_path)# list_path = r"D:\project\face\face_mask\2020\0/"list_path = r"D:\input\faces/"g = os.walk(list_path)files = ['%s\\%s' % (i[0], j) for i in g for j in i[-1] ifj.endswith('jpg')]width=input_shape[0]height=input_shape[1]scale = np.array([width, height, width, height])scale1 = np.array([width, height, width, height,width, height, width, height,width, height])resize_level=1count = 0ok_count = 0priorbox = PriorBox(cfg, image_size=(height, width))priors = priorbox.forward()# vc = cv2.VideoCapture(r"D:\project\face\Face-Track-Detect-Extract\videos\2_Obama.mp4")  # 读入视频文件vc = cv2.VideoCapture(0)  # 读入视频文件while True:  # 循环读取视频帧rval, img_raw = vc.read()# for file in files:#     file=r"d:/7_Cheering_Cheering_7_426.png"#     img_raw = cv2.imread(file)if img_raw is None:# print(file)continuestart = time.time()img_raw = cv2.resize(img_raw, input_shape)img=cv2.cvtColor(img_raw,cv2.COLOR_BGR2RGB)img = np.float32(img)img -= (104, 117, 123)image = img[:, :, ::-1].transpose((2, 0, 1))TestData = image[np.newaxis, :, :, :]start2=time.time()inname = [input.name for input in session.get_inputs()][0]outname = [output.name for output in session.get_outputs()]loc, conf,landmarks = session.run(outname, {inname:TestData})print('net time', time.time() - start2)start1=time.time()boxes = decode(np.squeeze(loc, axis=0), priors, cfg['variance'])boxes = boxes * scale / resize_levelscores = np.squeeze(conf, axis=0)[:,1]landmarks = decode_landm(np.squeeze(landmarks, axis=0), priors, cfg['variance'])landmarks = landmarks * scale1 / resize_level# ignore low scoresinds = np.where(scores > args.confidence_threshold)[0]boxes = boxes[inds]landmarks = landmarks[inds]scores = scores[inds]# keep top-K before NMSorder = scores.argsort()[::-1][:args.top_k]boxes = boxes[order]landmarks = landmarks[order]scores = scores[order]# do NMSdets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)keep = py_cpu_nms(dets, args.nms_threshold)dets = dets[keep, :]landmarks = landmarks[keep, :]print('time',time.time()-start,start1-start)dets = np.concatenate((dets, landmarks), axis=1)if args.show_image:for box in dets:if (box[3] < - box[1]) or (box[2] < - box[0]):continueelif box[0] < 0 or box[1] < 0:box[0] = max(0, box[0])box[1] = max(0, box[1])if (box[3] + box[1] > 2 * img_raw.shape[0]) or (box[0] + box[2] > 2 * img_raw.shape[1]):continueelif box[3] > img_raw.shape[0] or box[2] > img_raw.shape[1]:box[3] = min(img_raw.shape[0], box[3])box[2] = max(img_raw.shape[1], box[2])if (box[2] - box[0]) > 4 * (box[3] - box[1]) or (box[2] - box[0]) * 4 < (box[3] - box[1]):continue# if box[3]*resize_level > img_raw.shape[0] + 5 or box[2]*resize_level > img_raw.shape[1] + 5:#     # print('out_show', img_raw.shape, int(box[2]*resize_level),int(box[3]*resize_level))#     continuetext = "{:.2f}".format(box[4])box = list(map(int, box))cv2.rectangle(img_raw, (box[0] * resize_level, box[1] * resize_level),(box[2] * resize_level, box[3] * resize_level), (0, 0, 255), 1)cv2.circle(img_raw, (box[5], box[6]), 1, (0, 0, 255), 4)cv2.circle(img_raw, (box[7], box[8]), 1, (0, 255, 255), 4)cv2.circle(img_raw, (box[9], box[10]), 1, (255, 0, 255), 4)cv2.circle(img_raw, (box[11], box[12]), 1, (0, 255, 0), 4)cv2.circle(img_raw, (box[13], box[14]), 1, (255, 0, 0), 4)cx = box[0] * resize_level + 18cy = box[1] * resize_level + 18# cv2.putText(img_raw, text, (cx, cy),                           cv2.FONT_HERSHEY_DUPLEX, 0.3, (0, 255, 0))# landms# cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)# cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)# cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)# cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)# cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)# save imageif img_raw.shape[0] > 1080:fy = 1070 / img_raw.shape[0]img_raw = cv2.resize(img_raw, (0, 0), fx=fy, fy=fy, interpolation=cv2.INTER_NEAREST)cv2.imshow("sdf", img_raw)cv2.waitKey(1)# print(time.time()-start,"inputs name:", inname, "outputs name:", outname,prediction)# drawBox(boxes,img,img_shape)if __name__ == '__main__':main()

retinaface onnx相关推荐

  1. RV1126_python人脸识别Retinaface+MobilefaceNet

    RV1126_python人脸识别Retinaface+MobilefaceNet RV1126 具备RKNN 模块支持大部分如Pytorch.MXNet.Caffe.tensorflow.keras ...

  2. RetinaFace Mxnet转TensorRT

    1. github开源代码 RetinaFace TensorRT推理的开源代码位置在https://github.com/linghu8812/tensorrt_inference/tree/mas ...

  3. ONNX+TensorRT

    使用ONNX+TensorRT部署人脸检测和关键点250fps This article was original written by Jin Tian, welcome re-post, firs ...

  4. RetinaFace人脸检测使用

    本文将介绍如何使用RetinaFace训练模型进行使用 代码下载就去github官方https://github.com/biubug6/Pytorch_Retinaface -- 环境搭建 打开An ...

  5. pytorch版本RetinaFace人脸检测模型推理加速

    pytorch版本RetinaFace人脸检测模型推理加速_胖胖大海的博客-CSDN博客 pytorch版本RetinaFace人脸检测模型推理加速,去掉FPN第一层,不检测特别小的人脸框_胖胖大海的 ...

  6. pytorch转onnx踩坑日记

    在深度学习模型部署时,从pytorch转换onnx的过程中,踩了一些坑.本文总结了这些踩坑记录,希望可以帮助其他人. 首先,简单说明一下pytorch转onnx的意义.在pytorch训练出一个深度学 ...

  7. TensorRT实现RetinaFace推理加速(一)

    一.参考资料 tensorrtx/retinaface TensorRT实现yolov5推理加速(一) TensorRT实现yolov5推理加速(二) 二.实验环境 ##系统环境 Environmen ...

  8. 【.pth模型转换为.onnx模型】模型转换 英特尔神经计算棒 树莓派

    转换代码 注意点:要根据你的代码进行修改,修改最初的包等 import torch from models.with_mobilenet import PoseEstimationWithMobile ...

  9. 瑞芯微转化人脸检测retinaface模型

    瑞芯微转化人脸检测retinaface模型 一.运行docker 二.转换步骤 1.使用https://netron.app/查看模型的输入及输出 2.设置转换模型参数 3.运行文件生成rknn模型文 ...

最新文章

  1. Django项目配合sentry实现浅析
  2. HBase基本概念和hbase shell常用命令用法
  3. CentOS下软件的安装
  4. 如何使用Oracle的外部表查询警告日志文件
  5. SQL Server 行列转换(1)
  6. 微信养猫小程序源码搭建教程
  7. Magento 安装教程
  8. WP博客ajax,WordPress文章点击统计ajax版,兼容wp super cache缓存代码及插件
  9. outlook express重定向
  10. sqlitestudio和mysql_SQLiteStudio优雅调试Android手机数据库Sqlite(推荐)
  11. JavaScript 模块化编程(一):模块的写法
  12. 解决终端SSH连接服务器一段时间不操作之后卡死的问题
  13. 线性代数(同济) 第六版 复习
  14. 【转载】20款密码破解工具
  15. 【C语言】输出100内素数
  16. 历史上有哪些最凶计算机病毒?
  17. 线性表13|约瑟夫问题 – 数据结构和算法18
  18. SWT定位– setBounds()或setLocation()
  19. 【学习笔记】标签噪声下用于鲁棒性学习的对称交叉熵损失函数
  20. 骑行318、 2016.7.31

热门文章

  1. Android开发--FileInputStream/OutStream/Sdcard写入
  2. 深入理解 C 指针阅读笔记 -- 第五章
  3. shell sed过滤器详解
  4. sql盲注特点_SQL注入第二章——access,mssql,oracle
  5. TSS和内核态堆栈的关系
  6. mysql断网_mysql数据库断网链接
  7. 计算机专业文档写作课件,计算机专业论文写作的方法.ppt
  8. python控制结构(二)_Python程序控制结构---2
  9. Maven推出强化项目:maven-mvnd,构建速度大幅提升!
  10. 皮一皮:六神终于出奶茶了!