这是虚拟视点图像生成的第一篇博客,希望自己能坚持下来。

1.      实验环境

Win8-----vs2013-----opencv2.4.11

2.      实验材料

微软亚洲研究院多视点视频序列

下载地址:

http://research.microsoft.com/en-us/downloads/5e4675af-03f4-4b16-b3bc-a85c5bafb21d/

使用微软亚洲研究院的多视点视频序列,由视点0和视点2生成虚拟视点1的过程,采用opencv库来辅助开发,主要的成果有:

1,由参考视点0的彩色图像和深度图像生成虚拟视点1的彩色图像和深度图像,中间用z-buffer算法解决遮挡问题;

2,在1的基础上,由参考视点2的彩色图像和深度图像生成虚拟视点1的彩色图像和深度图像。

3,为解决裂缝问题,用中值滤波来滤波彩色图像和深度图像。

4,采用的是双向映射。

原始图像

生成的图像

源代码

#ifndef _wrapingOf3D1
#define _wrapingOf3D1
#include<iostream>
#include<opencv2\opencv.hpp>
class Histogram1D{
private:int histSize[1];float hranges[2];const float *ranges[1];int channels[1];
public:Histogram1D(){histSize[0] = 256;hranges[0] = 0.0;hranges[1] = 255.0;ranges[0] = hranges;channels[0] = 0;}cv::MatND getHistogram(const cv::Mat &image){cv::MatND hist;cv::calcHist(&image, 1, channels, cv::Mat(), hist, 1, histSize, ranges);return hist;}cv::Mat getHistogramImage(const cv::Mat &image){cv::MatND hist = getHistogram(image);double maxVal = 0;double minVal = 0;cv::minMaxLoc(hist, &minVal, &maxVal, 0, 0);cv::Mat histImg(histSize[0], histSize[0], CV_8U, cv::Scalar(255));int hpt = static_cast<int>(0.9*histSize[0]);for (int h = 0; h < histSize[0]; h++){float binVal = hist.at<float>(h);int intensity = static_cast<int>(binVal*hpt / maxVal);cv::line(histImg, cv::Point(h, histSize[0]), cv::Point(h, histSize[0] - intensity), cv::Scalar::all(100), 1);}return histImg;}
};/*
**define a struct included intrinsic and extrinsic args
*/
typedef struct {double m_K[3][3]; // 3x3 intrinsic matrixdouble m_RotMatrix[3][3]; // rotation matrixdouble m_Trans[3]; // translation vectordouble m_ProjMatrix[4][4]; // projection matrix
} CalibStruct;
/*
**define globle variables
*/
CalibStruct m_CalibParams[8];
int m_NumCameras = 8;
int m_Width = 1024, m_Height = 768; // camera resolution is 1024x768
double pts[8][3];/*
**declare function
*/
void InitializeFromFile(char *fileName);
double DepthLevelToZ(unsigned char d);
unsigned char ZToDepthLever(double z);
double projXYZtoUV(double projMatrix[4][4], double x, double y, double z, double *u, double *v);
void projUVZtoXY(double projMatrix[4][4], double u, double v, double z, double *x, double *y, double pt[8][2]);
void wrapingImage(int ref, int proj, cv::Mat &imageColor, cv::Mat &imageDepth, cv::Mat &imageColorOut);
void pointProject_from_ref_to_otherView(double pts[8][2], int ref, int u, int v, unsigned char d);
/*
**define function
*//*
** read text file and write args to struct of globle variable
*/
void readCalibrationFile(char *fileName)
{int i, j, k;FILE *pIn;double dIn; // dummy variableint camIdx;if (pIn = fopen(fileName, "r")){for (k = 0; k<m_NumCameras; k++){// camera indexfscanf(pIn, "%d", &camIdx);//std::cout << camIdx << std::endl;// camera intrinsicsfor (i = 0; i < 3; i++){fscanf(pIn, "%lf\t%lf\t%lf", &(m_CalibParams[camIdx].m_K[i][0]), &(m_CalibParams[camIdx].m_K[i][1]), &(m_CalibParams[camIdx].m_K[i][2]));//std::cout << (m_CalibParams[camIdx].m_K[i][0])<<"\t"<<(m_CalibParams[camIdx].m_K[i][1]) <<"\t"<< (m_CalibParams[camIdx].m_K[i][2]) << std::endl;}// read barrel distortion params (assume 0)fscanf(pIn, "%lf", &dIn);fscanf(pIn, "%lf", &dIn);// read extrinsicsfor (i = 0; i<3; i++){for (j = 0; j<3; j++){fscanf(pIn, "%lf", &dIn);m_CalibParams[camIdx].m_RotMatrix[i][j] = dIn;//std::cout << m_CalibParams[camIdx].m_RotMatrix[i][j] << std::endl;}fscanf(pIn, "%lf", &dIn);m_CalibParams[camIdx].m_Trans[i] = dIn;}}fclose(pIn);}
}// readCalibrationFile/*
**calcular all projectionMatrices depended on struct variables
*/
void computeProjectionMatrices()
{int i, j, k, camIdx;double(*inMat)[3];double exMat[3][4];for (camIdx = 0; camIdx<m_NumCameras; camIdx++){// The intrinsic matrixinMat = m_CalibParams[camIdx].m_K;// The extrinsic matrixfor (i = 0; i<3; i++){for (j = 0; j<3; j++){exMat[i][j] = m_CalibParams[camIdx].m_RotMatrix[i][j];}}for (i = 0; i<3; i++){exMat[i][3] = m_CalibParams[camIdx].m_Trans[i];}// Multiply the intrinsic matrix by the extrinsic matrix to find our projection matrixfor (i = 0; i<3; i++){for (j = 0; j<4; j++){m_CalibParams[camIdx].m_ProjMatrix[i][j] = 0.0;for (k = 0; k<3; k++){m_CalibParams[camIdx].m_ProjMatrix[i][j] += inMat[i][k] * exMat[k][j];}}}m_CalibParams[camIdx].m_ProjMatrix[3][0] = 0.0;m_CalibParams[camIdx].m_ProjMatrix[3][1] = 0.0;m_CalibParams[camIdx].m_ProjMatrix[3][2] = 0.0;m_CalibParams[camIdx].m_ProjMatrix[3][3] = 1.0;}
}/**
**init projection matrix
*/
void InitializeFromFile(char *fileName)
{readCalibrationFile(fileName);computeProjectionMatrices();
}
/**
**calcular z depended on d of depth image
*/
double DepthLevelToZ(unsigned char d)
{double z;double MinZ = 44.0, MaxZ = 120.0;z = 1.0 / ((d / 255.0)*(1.0 / MinZ - 1.0 / MaxZ) + 1.0 / MaxZ);return z;
}/**
**calcular d of depth image depended on z
*/
unsigned char ZToDepthLever(double z)
{double MinZ = 44.0, MaxZ = 120.0;unsigned char d;d = (unsigned char)(255.0*(1 / (double)z - 1 / MaxZ) / (1 / MinZ - 1 / MaxZ));return d;
}/**
**calcular x,y depended on u,v,z and projection Matrix
**for example,projection Matrix is m_CalibParams[i].m_ProjMatrix which is index of camera
*/
void projUVZtoXY(double projMatrix[4][4], double u, double v, double z, double *x, double *y)
{double c0, c1, c2;// image (0,0) is bottom lefthand cornerv = (double)m_Height - v - 1.0;c0 = z*projMatrix[0][2] + projMatrix[0][3];c1 = z*projMatrix[1][2] + projMatrix[1][3];c2 = z*projMatrix[2][2] + projMatrix[2][3];*y = u*(c1*projMatrix[2][0] - projMatrix[1][0] * c2) +v*(c2*projMatrix[0][0] - projMatrix[2][0] * c0) +projMatrix[1][0] * c0 - c1*projMatrix[0][0];*y /= v*(projMatrix[2][0] * projMatrix[0][1] - projMatrix[2][1] * projMatrix[0][0]) +u*(projMatrix[1][0] * projMatrix[2][1] - projMatrix[1][1] * projMatrix[2][0]) +projMatrix[0][0] * projMatrix[1][1] - projMatrix[1][0] * projMatrix[0][1];*x = (*y)*(projMatrix[0][1] - projMatrix[2][1] * u) + c0 - c2*u;*x /= projMatrix[2][0] * u - projMatrix[0][0];
} // projUVZtoXY/**
**calcular u,v,z1 depended on x,y,z
**z1 is after projection and z is before projection.z1 is return value
**/double projXYZtoUV(double projMatrix[4][4], double x, double y, double z, double *u, double *v)
{double w;*u = projMatrix[0][0] * x +projMatrix[0][1] * y +projMatrix[0][2] * z +projMatrix[0][3];*v = projMatrix[1][0] * x +projMatrix[1][1] * y +projMatrix[1][2] * z +projMatrix[1][3];w = projMatrix[2][0] * x +projMatrix[2][1] * y +projMatrix[2][2] * z +projMatrix[2][3];*u /= w;*v /= w;// image (0,0) is bottom lefthand corner*v = (double)m_Height - *v - 1.0;return w;} // projXYZtoUV/**
**/void pointProject_from_ref_to_otherView(double pts[8][3], int ref, int u, int v, unsigned char d)
{double x, y, z = DepthLevelToZ(d);//printf("Testing projection of pt (%d,%d) in camera 0 with d = %d (z = %f) to other cameras\n", u, v, d, z);projUVZtoXY(m_CalibParams[ref].m_ProjMatrix, (double)u, (double)v, z, &x, &y);//printf("3D pt = (%f, %f, %f) [coordinates wrt reference camera]\n", x, y, z);for (int cam = 0; cam<8; cam++){double *pt = pts[cam];pt[2]=projXYZtoUV(m_CalibParams[cam].m_ProjMatrix, x, y, z, &pt[0], &pt[1]);//printf("Camera %d: (%f, %f)\n", cam, pt[0], pt[1]);pt[2] = ZToDepthLever(pt[2]);}
}/**
**wraping image,ref represent reference cam,proj represent projection cam
**the kernal code
**/
void wrapingImage(int ref, int proj, cv::Mat &imageColor, cv::Mat &imageDepth, cv::Mat &imageColorOut, cv::Mat &imageDepthOut)
{for (int v = 0; v < imageColor.rows; v++)for (int u = 0; u < imageColor.cols; u++){double d = imageDepth.at<cv::Vec3b>(v, u)[0];pointProject_from_ref_to_otherView(pts, ref, u, v, d);int u1 = (int)pts[proj][0];int v1 = (int)pts[proj][1];int k1 = (int)pts[proj][2];if (u1 < 0 || u1 >= imageColor.cols - 1 || v1 < 0 || v1 >= imageColor.rows - 1)continue;if (k1 < imageDepthOut.at<cv::Vec3b>(v1, u1)[0])continue;imageColorOut.at<cv::Vec3b>(v1, u1) = imageColor.at<cv::Vec3b>(v, u);imageDepthOut.at<cv::Vec3b>(v1, u1)[0] = k1;imageDepthOut.at<cv::Vec3b>(v1, u1)[1] = k1;imageDepthOut.at<cv::Vec3b>(v1, u1)[2] = k1;}
}void dipslay(char *calibParam, char *refColor, char *refDepth, char *refColor2, char *refDepth2, char *actColor)
{//initialize projection_MatrixInitializeFromFile(calibParam);//display projection_Matrixfor (int i = 0; i < m_NumCameras; i++){for (int j = 0; j < 3; j++){for (int k = 0; k < 3; k++)std::cout << m_CalibParams[i].m_K[j][k] << "\t";std::cout << std::endl;}for (int j = 0; j < 3; j++){for (int k = 0; k < 3; k++)std::cout << m_CalibParams[i].m_RotMatrix[j][k] << "\t";std::cout << std::endl;}for (int k = 0; k < 3; k++)std::cout << m_CalibParams[i].m_Trans[k] << "\t";std::cout << std::endl;std::cout << std::endl;std::cout << std::endl;std::cout << std::endl;}//suspend and users enter a digitint aa;std::cin >> aa;//read color image and depth image of refrencecv::Mat imageColor = cv::imread(refColor);cv::Mat imageDepth = cv::imread(refDepth);cv::Mat imageColor2 = cv::imread(refColor2);cv::Mat imageDepth2 = cv::imread(refDepth2);//read true image used to comparecv::Mat imageColor_actual = cv::imread(actColor);//set reference camint ref = 0;int ref2 = 2;//set projection camint proj = 1;//test codepointProject_from_ref_to_otherView(pts, ref, 700, 700, imageDepth.at<cv::Vec3b>(700, 700)[0]);for (int i = 0; i < 8; i++){std::cout << pts[i][0] << "\t" << pts[i][1] << std::endl;}std::cin >> aa;//define two variable of outputcv::Mat imageColorOut;cv::Mat imageColorOut2;cv::Mat imageDepthOut;imageColorOut.create(imageColor.rows, imageColor.cols, imageColor.type());imageColorOut2.create(imageColor.rows, imageColor.cols, imageColor.type());imageDepthOut.create(imageColor.rows, imageColor.cols, imageColor.type());for (int v = 0; v < imageColor.rows; v++){for (int u = 0; u < imageColor.cols; u++){imageColorOut.at<cv::Vec3b>(v, u)[0] = 0;imageColorOut.at<cv::Vec3b>(v, u)[1] = 0;imageColorOut2.at<cv::Vec3b>(v, u)[2] = 0;imageColorOut2.at<cv::Vec3b>(v, u)[0] = 0;imageColorOut2.at<cv::Vec3b>(v, u)[1] = 0;imageColorOut.at<cv::Vec3b>(v, u)[2] = 0;imageDepthOut.at<cv::Vec3b>(v, u)[0] = 0;imageDepthOut.at<cv::Vec3b>(v, u)[1] = 0;imageDepthOut.at<cv::Vec3b>(v, u)[2] = 0;}}//save_dirchar *save_dir = "C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\";//wraping from reference view to virtruel view wrapingImage(ref, proj, imageColor, imageDepth, imageColorOut,imageDepthOut);wrapingImage(ref2, proj, imageColor2, imageDepth2, imageColorOut, imageDepthOut);//display reference_image/*cv::imshow("reference_image", imageColor);cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\reference_image.jpg", imageColor);*///display virtruel_imagecv::medianBlur(imageColorOut, imageColorOut, 3);cv::imshow("virtruel_Color_image01", imageColorOut);cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Color_image01.jpg", imageColorOut);display virtruel_image//cv::imshow("virtruel_Color_image21", imageColorOut2);//cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Color_image21.jpg", imageColorOut2);//display virtruel_imagecv::medianBlur(imageDepthOut, imageDepthOut, 3);cv::imshow("virtruel_Depth_image", imageDepthOut);cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Depth_image.jpg", imageDepthOut);//display actruel_imagecv::imshow("actruel_image", imageColor_actual);cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\actruel_image.jpg", imageColor_actual);mix reference and virtruel and check changes//cv::Mat imageColorRefVSProj;//cv::addWeighted(imageColor, 0.5, imageColorOut, 0.5, 0.0, imageColorRefVSProj);//cv::imshow("imageColorRefVSProj", imageColorRefVSProj);//cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\imageColorRefVSProj.jpg", imageColorRefVSProj);mix actual and virtruel and check changes//cv::Mat imageColorActVSProj;//cv::addWeighted(imageColor_actual, 0.5, imageColorOut, 0.5, 0.0, imageColorActVSProj);//cv::imshow("imageColorActVSProj", imageColorActVSProj);//cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\imageColorActVSProj.jpg", imageColorActVSProj);cv::waitKey();}void main()
{char *refColor = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam0\\color-cam0-f000.jpg";char *refDepth = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam0\\depth-cam0-f000.png";char *refColor2 = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam2\\color-cam2-f000.jpg";char *refDepth2 = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam2\\depth-cam2-f000.png";char *calibParam = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\calibParams-breakdancers.txt";char *actColor = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam1\\color-cam1-f000.jpg";dipslay(calibParam, refColor, refDepth, refColor2, refDepth2, actColor);
}
#endif

了解更多关于《计算机视觉与图形学》相关知识,请关注公众号:


下载上面论文和代码,请关注上方公众号并回复:传统虚拟视点生成

传统基于DIBR的虚拟视点生成理论+代码相关推荐

  1. 关于虚拟视点生成(Depth Based Image Rendering)问题

    一篇简要介绍 原理 的中文Blog 虚拟视点绘制 该Blog作者Github上对应的一个项目:Multi_View_System 一些Blog 真实场景的虚拟视点合成(View Synthsis)详解 ...

  2. matlab同步发电机外特性仿真,基于Matlab-Simulink的虚拟同步发电机控制方法的仿真研究...

    分 类 号: TM341 单位代码:10183 研究生学号: 201255E025 密 级:公开 吉 林 大 学硕士学位论文 (专业学位) 基于 Matlab-Simulink 的虚拟同步发电机控制方 ...

  3. CVPR 2020 | ACGPN: 基于图像的虚拟换装新思路

    点击上方"机器学习与生成对抗网络",关注"星标" 获取有趣.好玩的前沿干货! CVPR 2020之117篇GAN论文分类清单 编辑  AI科技评论 本文介绍的是 ...

  4. 一个基于HTML5的虚拟实验室

    第十一届国际计算机科学教育会议 2016.8.23-25 日本名古屋大学 一个基于HTML5的虚拟实验室 余笙,黄娟,张飞,安应,钟平 中南大学信息科学与工程学院 中国长沙 shengyu@csu.e ...

  5. 基于LabView的虚拟仪器实现频率稳定度的测量(图)

    2006-04-27 21:45:55 基于LabView的虚拟仪器实现频率稳定度的测量(图) align=right marginWidth=0 marginHeight=0 src="h ...

  6. 基于网络的虚拟仪器测试系统

    引 言 著名科学家门捷列夫说:"没有测量,就没有科学".测量科学的先驱凯尔文又说,一个事物你如果能够测量它,并且能用数字来表达它,你对它就有了深刻的了解:但如果你不知道如何测量它, ...

  7. 项目记录: 3DOF+实时渲染 之 虚拟视点合成

    项目记录: 3DOF+实时渲染 之 虚拟视点合成 总结一下实现3DOF+渲染的一些细节. 虚拟视点合成 3DOF+实时渲染方案 0. 3DOF+概念 关于 3DOF+ 概念 可以参考我之前的博文. 3 ...

  8. nginx基于IP的虚拟主机

    知识点: server的语法: upstream语法: upstream中192.168.100.1不是ip只是个标识,只要和下面的proxy_pass 对应即可. 基于IP的虚拟主机: listen ...

  9. unity3d 虚拟博物馆_基于Unity3D的虚拟博物馆信息可视化系统①

    基于 Unity3D 的虚拟博物馆信息可视化系统① 任国栋 1, 陈林华 1, 陶学锋 2, 方先行 2 [摘 要] 摘 要 : 信息可视化技术可以帮助我们加深对大规模复杂数据和信息间 相互关系的理解 ...

最新文章

  1. JDBC-ODBC桥乱码问题解决方案
  2. 【杂谈】有三AI限量定制版书签来了,你准备好入手了吗?
  3. 实现option上下移动_Perona-Malik方程(各向同性非线性扩散实现图像滤波)
  4. qt使用自带的日志输出实例输出日志时,在日志中显示行数
  5. perl 正则表达式 引擎_Perl 5引擎概述
  6. php中find的函数_filter()、find()函数的区别
  7. (24)打两拍verilog与VHDL编码(学无止境)
  8. 在Emacs下用C/C++编程(转载)
  9. Arcgis数据集如何以线性单位做投影
  10. python中多态是什么意思_python类的多态是什么
  11. iOS 相册多选 相机选择图片
  12. Rabbitmq基于插件的延迟队列
  13. MySQL事务原子性、一致性和持久性以及回滚是如何实现的?
  14. html5ify插件,漂亮的jQuery对话框插件Dialogify
  15. 学习可爱彩色线条PS极简马克笔简笔画:鞋子篇
  16. 撤销博士学位!取消硕导资格!科技部最新通报一批科研不端案件...
  17. 数据结构-广义表详解(类C语言版)
  18. 性能测试能力提升-JVM GC原理
  19. Python结合MySQL数据库编写简单信息管理系统
  20. 一句话定义:什么是长尾分布?(数据的长尾标签)

热门文章

  1. unity捕鱼达人----炮台的旋转的实现以及水波纹动画的制作
  2. 【概率论】随机选出一对小于等于N的互异正整数
  3. 关于acrobat登录弹窗解决办法
  4. HTML+CSS+JS 生鲜水果蔬菜商城网站设计——天天生鲜水果蔬菜商城(10页) web前端设计与开发期末作品_期末大作业
  5. 2.1剖析一个Jsp页面
  6. 【公告】xx 网络白皮书和Praxxis 技术文件正式发布
  7. 树叶贴画机器人_学生手工论文,关于对学前教育手工课教学相关参考文献资料-免费论文范文...
  8. 安全圈#遍地是大王,短暂又辉煌
  9. 条款3:理解decltype
  10. zip压缩包读写操作zipfile