在介绍实时美颜算法之前,你可以参考程序员杠把子的博客:

http://blog.csdn.net/oshunz/article/details/50536031

实时美颜算法考虑到性能的影响,PC平台上的很多美颜算法在手机镜头预览渲染的时候是很力不从心的。一般情况下都是基于高斯模糊进行实时美颜,这是性能和效果的折中方案。在程序员杠把子的博客中所描述的实时美颜算法实现的效果并不是很好,程序员杠把子的glsl是这样的:

precision mediump float;

varying mediump vec2 textureCoordinate;

uniform sampler2D inputImageTexture;

uniform vec2 singleStepOffset;

uniform mediump float params;

const highp vec3 W = vec3(0.299,0.587,0.114);

vec2 blurCoordinates[20];

float hardLight(float color)

{

if(color <= 0.5)

color = color * color * 2.0;

else

color = 1.0 - ((1.0 - color)*(1.0 - color) * 2.0);

return color;

}

void main(){

vec3 centralColor = texture2D(inputImageTexture, textureCoordinate).rgb;

blurCoordinates[0] = textureCoordinate.xy + singleStepOffset * vec2(0.0, -10.0);

blurCoordinates[1] = textureCoordinate.xy + singleStepOffset * vec2(0.0, 10.0);

blurCoordinates[2] = textureCoordinate.xy + singleStepOffset * vec2(-10.0, 0.0);

blurCoordinates[3] = textureCoordinate.xy + singleStepOffset * vec2(10.0, 0.0);

blurCoordinates[4] = textureCoordinate.xy + singleStepOffset * vec2(5.0, -8.0);

blurCoordinates[5] = textureCoordinate.xy + singleStepOffset * vec2(5.0, 8.0);

blurCoordinates[6] = textureCoordinate.xy + singleStepOffset * vec2(-5.0, 8.0);

blurCoordinates[7] = textureCoordinate.xy + singleStepOffset * vec2(-5.0, -8.0);

blurCoordinates[8] = textureCoordinate.xy + singleStepOffset * vec2(8.0, -5.0);

blurCoordinates[9] = textureCoordinate.xy + singleStepOffset * vec2(8.0, 5.0);

blurCoordinates[10] = textureCoordinate.xy + singleStepOffset * vec2(-8.0, 5.0);

blurCoordinates[11] = textureCoordinate.xy + singleStepOffset * vec2(-8.0, -5.0);

blurCoordinates[12] = textureCoordinate.xy + singleStepOffset * vec2(0.0, -6.0);

blurCoordinates[13] = textureCoordinate.xy + singleStepOffset * vec2(0.0, 6.0);

blurCoordinates[14] = textureCoordinate.xy + singleStepOffset * vec2(6.0, 0.0);

blurCoordinates[15] = textureCoordinate.xy + singleStepOffset * vec2(-6.0, 0.0);

blurCoordinates[16] = textureCoordinate.xy + singleStepOffset * vec2(-4.0, -4.0);

blurCoordinates[17] = textureCoordinate.xy + singleStepOffset * vec2(-4.0, 4.0);

blurCoordinates[18] = textureCoordinate.xy + singleStepOffset * vec2(4.0, -4.0);

blurCoordinates[19] = textureCoordinate.xy + singleStepOffset * vec2(4.0, 4.0);

float sampleColor = centralColor.g * 20.0;

sampleColor += texture2D(inputImageTexture, blurCoordinates[0]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[1]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[2]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[3]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[4]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[5]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[6]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[7]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[8]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[9]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[10]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[11]).g;

sampleColor += texture2D(inputImageTexture, blurCoordinates[12]).g * 2.0;

sampleColor += texture2D(inputImageTexture, blurCoordinates[13]).g * 2.0;

sampleColor += texture2D(inputImageTexture, blurCoordinates[14]).g * 2.0;

sampleColor += texture2D(inputImageTexture, blurCoordinates[15]).g * 2.0;

sampleColor += texture2D(inputImageTexture, blurCoordinates[16]).g * 2.0;

sampleColor += texture2D(inputImageTexture, blurCoordinates[17]).g * 2.0;

sampleColor += texture2D(inputImageTexture, blurCoordinates[18]).g * 2.0;

sampleColor += texture2D(inputImageTexture, blurCoordinates[19]).g * 2.0;

sampleColor = sampleColor / 48.0;

float highPass = centralColor.g - sampleColor + 0.5;

for(int i = 0; i < 5;i++)

{

highPass = hardLight(highPass);

}

float luminance = dot(centralColor, W);

float alpha = pow(luminance, params);

vec3 smoothColor = centralColor + (centralColor-vec3(highPass))*alpha*0.1;

gl_FragColor = vec4(mix(smoothColor.rgb, max(smoothColor, centralColor), alpha), 1.0);

}

实时渲染的效果并不是非常理想,有些边缘地方在高度磨皮之后会出现比较明显的错误,像影子一样的东西,如下图所示:

MagicCamera的磨皮效果

从图中可以看到,高度磨皮的效果非常不自然,而且在光照比较强的情况下,边缘出现了非常明显的影子一样的透明的效果。

经过本人的研究,核心部分的高斯模糊并没有什么问题,但采样的数值并不对,采样的数值应该根据宽度和高度的变化进行相应的调整,经过调试优化,得到的GLSL代码如下:

precision lowp float;

uniform sampler2D inputTexture;

varying lowp vec2 textureCoordinate;

uniform int width;

uniform int height;

// 磨皮程度(由低到高: 0.5 ~ 0.99)

uniform float opacity;

void main() {

vec3 centralColor;

centralColor = texture2D(inputTexture, textureCoordinate).rgb;

if(opacity < 0.01) {

gl_FragColor = vec4(centralColor, 1.0);

} else {

float x_a = float(width);

float y_a = float(height);

float mul_x = 2.0 / x_a;

float mul_y = 2.0 / y_a;

vec2 blurCoordinates0 = textureCoordinate + vec2(0.0 * mul_x, -10.0 * mul_y);

vec2 blurCoordinates2 = textureCoordinate + vec2(8.0 * mul_x, -5.0 * mul_y);

vec2 blurCoordinates4 = textureCoordinate + vec2(8.0 * mul_x, 5.0 * mul_y);

vec2 blurCoordinates6 = textureCoordinate + vec2(0.0 * mul_x, 10.0 * mul_y);

vec2 blurCoordinates8 = textureCoordinate + vec2(-8.0 * mul_x, 5.0 * mul_y);

vec2 blurCoordinates10 = textureCoordinate + vec2(-8.0 * mul_x, -5.0 * mul_y);

mul_x = 1.8 / x_a;

mul_y = 1.8 / y_a;

vec2 blurCoordinates1 = textureCoordinate + vec2(5.0 * mul_x, -8.0 * mul_y);

vec2 blurCoordinates3 = textureCoordinate + vec2(10.0 * mul_x, 0.0 * mul_y);

vec2 blurCoordinates5 = textureCoordinate + vec2(5.0 * mul_x, 8.0 * mul_y);

vec2 blurCoordinates7 = textureCoordinate + vec2(-5.0 * mul_x, 8.0 * mul_y);

vec2 blurCoordinates9 = textureCoordinate + vec2(-10.0 * mul_x, 0.0 * mul_y);

vec2 blurCoordinates11 = textureCoordinate + vec2(-5.0 * mul_x, -8.0 * mul_y);

mul_x = 1.6 / x_a;

mul_y = 1.6 / y_a;

vec2 blurCoordinates12 = textureCoordinate + vec2(0.0 * mul_x,-6.0 * mul_y);

vec2 blurCoordinates14 = textureCoordinate + vec2(-6.0 * mul_x,0.0 * mul_y);

vec2 blurCoordinates16 = textureCoordinate + vec2(0.0 * mul_x,6.0 * mul_y);

vec2 blurCoordinates18 = textureCoordinate + vec2(6.0 * mul_x,0.0 * mul_y);

mul_x = 1.4 / x_a;

mul_y = 1.4 / y_a;

vec2 blurCoordinates13 = textureCoordinate + vec2(-4.0 * mul_x,-4.0 * mul_y);

vec2 blurCoordinates15 = textureCoordinate + vec2(-4.0 * mul_x,4.0 * mul_y);

vec2 blurCoordinates17 = textureCoordinate + vec2(4.0 * mul_x,4.0 * mul_y);

vec2 blurCoordinates19 = textureCoordinate + vec2(4.0 * mul_x,-4.0 * mul_y);

float central;

float gaussianWeightTotal;

float sum;

float sampler;

float distanceFromCentralColor;

float gaussianWeight;

float distanceNormalizationFactor = 3.6;

central = texture2D(inputTexture, textureCoordinate).g;

gaussianWeightTotal = 0.2;

sum = central * 0.2;

sampler = texture2D(inputTexture, blurCoordinates0).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates1).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates2).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates3).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates4).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates5).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates6).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates7).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates8).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates9).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates10).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates11).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.09 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates12).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.1 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates13).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.1 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates14).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.1 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates15).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.1 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates16).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.1 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates17).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.1 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates18).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.1 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sampler = texture2D(inputTexture, blurCoordinates19).g;

distanceFromCentralColor = min(abs(central - sampler) * distanceNormalizationFactor, 1.0);

gaussianWeight = 0.1 * (1.0 - distanceFromCentralColor);

gaussianWeightTotal += gaussianWeight;

sum += sampler * gaussianWeight;

sum = sum/gaussianWeightTotal;

sampler = centralColor.g - sum + 0.5;

// 高反差保留

for(int i = 0; i < 5; ++i) {

if(sampler <= 0.5) {

sampler = sampler * sampler * 2.0;

} else {

sampler = 1.0 - ((1.0 - sampler)*(1.0 - sampler) * 2.0);

}

}

float aa = 1.0 + pow(sum, 0.3) * 0.09;

vec3 smoothColor = centralColor * aa - vec3(sampler) * (aa - 1.0);

smoothColor = clamp(smoothColor, vec3(0.0), vec3(1.0));

smoothColor = mix(centralColor, smoothColor, pow(centralColor.g, 0.33));

smoothColor = mix(centralColor, smoothColor, pow(centralColor.g, 0.39));

smoothColor = mix(centralColor, smoothColor, opacity);

gl_FragColor = vec4(pow(smoothColor, vec3(0.96)), 1.0);

}

}

磨皮等级(0% ~ 100%)可以这么计算:

/**

* 设置磨皮程度

* @param percent 百分比

*/

public void setSmoothOpacity(float percent) {

float opacity;

if (percent <= 0) {

opacity = 0.0f;

} else {

opacity = calculateOpacity(percent);

}

setFloat(mOpacityLoc, opacity);

}

/**

* 根据百分比计算出实际的磨皮程度

* @param percent

* @return

*/

private float calculateOpacity(float percent) {

float result = 0.0f;

// TODO 可以加入分段函数,对不同等级的磨皮进行不一样的处理

result = (float) (1.0f - (1.0f - percent + 0.02) / 2.0f);

return result;

}

经过优化后的磨皮算法效果如下:

优化后的磨皮效果

可以看到,磨皮的细腻程度好了很多,并且,边缘出现的影子一样的效果也相对没那么严重。对此,你可以通过锐化或者其他特效覆盖掉边缘的影子一样的效果。

具体的效果,请参考本人正在开发的相机项目:

https://github.com/CainKernel/CainCamera

由于最近这段时间有其他事情,比较忙碌,相机项目还有很多功能没有实现,并且还有很多地方需要优化的地方,以后将会不定期更新实现剩余的功能。

android照片美颜项目_Android OpenGLES 实时美颜(磨皮)的优化相关推荐

  1. android照片美颜项目_抖音美颜效果开源实现,从AI到美颜全流程讲解

    美颜和短视频 美颜相关APP可以说是现在手机上的必备的软件,例如抖音,快手,拍出的"照骗"和视频不加美颜效果,估计没有人敢传到网上.很多人一直好奇美颜类APP是如何开发出来的.本文 ...

  2. 既要“美颜”,还要“保真”,美颜api该如何做出改变?

    近年随着居民消费水平的不断提高.智能手机的普及,拍照/拍摄更加成为了大众的日常习惯.大家喜欢通过照片/视频来分享到社交平台上,满足自己的社交需求.但与此同时,越来越多的网红"蛇精脸" ...

  3. Android 短视频编辑开发之摄像头预览实时美颜(三)

    前言: 在上一篇文章中给小伙伴们介绍了进行Camera预览,如果你还没有看过的话,建议先去看上一篇文章<Android 短视频开发之摄像头预览(二> 本篇文章会介绍如何实现摄像头预览画面实 ...

  4. 视频教程-C++编程FFMpeg(QT5+OpenCV)实战--实时美颜直播推流-C/C++

    C++编程FFMpeg(QT5+OpenCV)实战--实时美颜直播推流 夏曹俊:南京捷帝科技有限公司创始人,南京大学计算机硕士毕业,有15年c++跨平台项目研发的经验,领导开发过大量的c++虚拟仿真, ...

  5. 你今天怎么这么好看——基于深度学习的大型现场实时美颜

    Photo from BoredPanda 美颜是当下直播甚至是所有形式对外展示的一个必备条件.手机端的美颜就像私人化妆师,能够帮助我们实现各种心仪的效果. 而大型娱乐节目一般都是提前进行录制,然后进 ...

  6. C++编程FFMpeg实时美颜直播推流实战-基于ffmpeg,qt5,opencv视频课程-夏曹俊-专题视频课程...

    C++编程FFMpeg实时美颜直播推流实战-基于ffmpeg,qt5,opencv视频课程-11788人已学习 课程介绍         C++编程FFMpeg实时美颜直播推流实战视频培训教程,本课程 ...

  7. 美狐人脸识别SDK技术,人脸检测SDK技术达到实时美颜滤镜的效果

    为了实现高颜值又不失真的直播实时美颜滤镜的功能,技术关键点主要涉及人脸识别SDK技术和人脸检测SDK技术,对特征点定位与跟踪.特效处理.美白和磨皮四个方面.下面依次介绍这四个主要功能点: 1.美狐人脸 ...

  8. android简单app实例_Android安卓小项目实战视频教程集锦

    Android安卓小项目实战视频教程,点击进入视频教程: 一.安卓项目视频教程: 1蓝牙聊天APP介绍-分步骤介绍一个简单安卓蓝牙APP的开发过程 - 西瓜视频 2蓝牙聊天开发流程-分步骤介绍一个简单 ...

  9. 【UI学习】Android github开源项目,酷炫自定义控件(View)汇总

    [UI学习]Android github开源项目,酷炫自定义控件(View)汇总 转载  2016年09月04日 23:23:15 3484 近期整理的比较酷炫并且我们会经常用到的custom vie ...

最新文章

  1. 简单介绍ASP中Cache技术的应用
  2. GPG key retrieval failed: [Errno 14]
  3. 多重线性回归 多元线性回归_了解多元线性回归
  4. 阿里P8架构师谈:架构设计经验汇总
  5. bash:附近有语法错误_Bash备忘单:按键组合和特殊语法
  6. java8 stream多次map_java8streamapi:如何将列表转换为在列表中具有重复键的MapLong,Set?...
  7. 疑似realme X9系列神秘新机曝光:搭载骁龙870处理器
  8. 2010版EXCEL下方sheet工作表隐藏了
  9. Jqgrid pager 关于“local” dataType 动态加载数据分页的研究(没好用的研究结果)...
  10. ItelliJ IDEA下载及获取注册码详解
  11. 09-排序2 Insert or Merge
  12. 广告投放类型以及各大平台计费准则+推广常用评价指标
  13. PyTorch实战 | 文本情感分类任务 | LSTM与LSTM+Attention | TextCNN
  14. Quadratic equation
  15. 视频横竖屏模式切换,如何将多个视频任意转换
  16. 单库单表到多库多表的全量复制方案
  17. 在树莓派中Linux环境下rpm包的安装
  18. 补充(二)古典密码两张思维导图速通
  19. 含泪整理最优质现代家装su模型素材,你想要的这里都有
  20. 微服务--十个设计要点

热门文章

  1. 2021-2027全球与中国无纸温度记录仪市场现状及未来发展趋势
  2. 16.钩子事件hookEvent与插件。
  3. usb复合设备 linux,使用STM32CubeMX编写USB复合设备
  4. MobPush智能精准推送,运营效果加倍
  5. JS-求三个数中的最大数
  6. 韩顺平满汉楼java源码_图书商城app
  7. 《IT十八掌大数据内功修炼到企业实战2.0》全套视频2
  8. 球半足球分析,竞彩瑞典超:天狼星VS哥德堡 06月28日
  9. 全球与中国双馈异步风力发电机市场经营模式分析与投资策略建议报告2022-2028年版
  10. 孩子更优秀家长更轻松,连央视都在夸的在线教育究竟有多好?