时间: 2020-7到12月
修改:20201213
声明:此文为个人学习笔记,自己温习,也乐在交流。如果文章中有侵权,错误,或者有不同的理解,烦请大家多多留言:指正,指教,鄙人会及时纠正。此致崇高敬意!
PBR系列笔记共三篇,本篇为第三篇:基于 Unity URP BRDF 算法分析
目录
简要
URP Lit 思路导图
顶点着色器 LitPassVertex分析
片元着色器 LitPassFragment
总结
参考资料
简要:本笔记略过大量的基础信息。只有在效果开发中,出现频率较高处测试下。
环境:U3D 2019.4 URP 7.3.1 管线下的标准 Lit.shader 分析
公式说明:Fragment DirectBDRF 函数内包含对迪斯尼BRDF的精简拟合公式(笔记下文有详细注释)
顶点着色器 LitPassVertex
日常效果开发中:法线的处理,和SH的调用,对美术效果影响比较大。这两块功能的处理也是极易出现错误的地方。处理好了这两块,对美术效果会有质的提升。祥见第6点和第8点。
1、GetVertexPositionInputs
VertexPositionInputs vertexInput = GetVertexPositionInputs(input.positionOS.xyz);
VertexPositionInputs GetVertexPositionInputs(float3 positionOS)
{
VertexPositionInputs input;
input.positionWS = TransformObjectToWorld(positionOS);
input.positionVS = TransformWorldToView(input.positionWS);
input.positionCS = TransformWorldToHClip(input.positionWS);
float4 ndc = input.positionCS * 0.5f;
input.positionNDC.xy = float2(ndc.x, ndc.y * _ProjectionParams.x) + ndc.w;
input.positionNDC.zw = input.positionCS.zw;
return input;
}
struct VertexPositionInputs
{
float3 positionWS; // World space position
float3 positionVS; // View space position
float4 positionCS; // Homogeneous clip space position
float4 positionNDC;// Homogeneous normalized device coordinates
};
2、GetVertexNormalInputs
VertexNormalInputs normalInput = GetVertexNormalInputs(input.normalOS, input.tangentOS);
VertexNormalInputs GetVertexNormalInputs(float3 normalOS)
{
VertexNormalInputs tbn;
tbn.tangentWS = real3(1.0, 0.0, 0.0);
tbn.bitangentWS = real3(0.0, 1.0, 0.0);
tbn.normalWS = TransformObjectToWorldNormal(normalOS);
return tbn;
}
VertexNormalInputs GetVertexNormalInputs(float3 normalOS, float4 tangentOS)
{
VertexNormalInputs tbn;
// 符合mikkts空间要求。 仅在提取法线时标准化。
real sign = tangentOS.w * GetOddNegativeScale();
tbn.normalWS = TransformObjectToWorldNormal(normalOS);
tbn.tangentWS = TransformObjectToWorldDir(tangentOS.xyz);
tbn.bitangentWS = cross(tbn.normalWS, tbn.tangentWS) * sign;
return tbn;
}
struct VertexNormalInputs
{
real3 tangentWS;
real3 bitangentWS;
float3 normalWS;
};
3、GetCameraPositionWS
4、VertexLighting
half3 vertexLight = VertexLighting(vertexInput.positionWS, normalInput.normalWS);
half3 VertexLighting(float3 positionWS, half3 normalWS)
{
half3 vertexLightColor = half3(0.0, 0.0, 0.0);
#ifdef _ADDITIONAL_LIGHTS_VERTEX
uint lightsCount = GetAdditionalLightsCount();
for (uint lightIndex = 0u; lightIndex < lightsCount; ++lightIndex)
{
Light light = GetAdditionalLight(lightIndex, positionWS);
half3 lightColor = light.color * light.distanceAttenuation;
vertexLightColor += LightingLambert(lightColor, light.direction, normalWS);
}
#endif
return vertexLightColor;
}
5、ComputeFogFactor
half fogFactor = ComputeFogFactor(vertexInput.positionCS.z);
real ComputeFogFactor(float z)
{
float clipZ_01 = UNITY_Z_0_FAR_FROM_CLIPSPACE(z);
#if defined(FOG_LINEAR)
// factor = (end-z)/(end-start) = z * (-1/(end-start)) + (end/(end-start))
float fogFactor = saturate(clipZ_01 * unity_FogParams.z + unity_FogParams.w);
return real(fogFactor);
#elif defined(FOG_EXP) || defined(FOG_EXP2)
// factor = exp(-(density*z)^2)
// -density * z computed at vertex
return real(unity_FogParams.x * clipZ_01);
#else
return 0.0h;
#endif
}
6、NormalizeNormalPerVertex 法线归一化
output.normalWS = NormalizeNormalPerVertex(normalInput.normalWS);
real3 NormalizeNormalPerVertex(real3 normalWS)
{
#if defined(SHADER_QUALITY_LOW) && defined(_NORMALMAP)
return normalWS;
#else
return normalize(normalWS);
#endif
}
重要的法线归一化:为了获得更好的质量,法线应该在之前和之后标准化插值。
在顶点处,蒙皮或混合形状可能会大大改变法线的长度。
在像素中,因为即使输出单位长度的法线插值也会使其变为非单位。
使用法线贴图时会出现碎片,因为mikktspace会建立非正交基的空间。
但是,我们将在此处尝试平衡性能与质量,同时让用户将其配置为着色器质量等级。
低质量层:根据是否对法线贴图采样,对每个顶点或每个像素进行归一化。
中质量层:始终对每个顶点进行标准化。 仅在使用法线贴图时对每个像素进行归一化
高质量层:在顶点和像素着色器中均进行归一化。
7、OUTPUT_LIGHTMAP_UV
OUTPUT_LIGHTMAP_UV(input.lightmapUV, unity_LightmapST, output.lightmapUV);
#ifdef LIGHTMAP_ON
#define DECLARE_LIGHTMAP_OR_SH(lmName, shName, index) float2 lmName : TEXCOORD##index
#define OUTPUT_LIGHTMAP_UV(lightmapUV, lightmapScaleOffset, OUT) OUT.xy = lightmapUV.xy * lightmapScaleOffset.xy + lightmapScaleOffset.zw;
#define OUTPUT_SH(normalWS, OUT)
#else
#define DECLARE_LIGHTMAP_OR_SH(lmName, shName, index) half3 shName : TEXCOORD##index
#define OUTPUT_LIGHTMAP_UV(lightmapUV, lightmapScaleOffset, OUT)
#define OUTPUT_SH(normalWS, OUT) OUT.xyz = SampleSHVertex(normalWS)
#endif
8、OUTPUT_SH
顶点SH颜色,会经常用于角色和场景中。使用宏来判断是使用普通Sample版还是线性版。祥细测试见本笔记下文。
OUTPUT_SH(output.normalWS.xyz, output.vertexSH);
#ifdef LIGHTMAP_ON
#define DECLARE_LIGHTMAP_OR_SH(lmName, shName, index) float2 lmName : TEXCOORD##index
#define OUTPUT_LIGHTMAP_UV(lightmapUV, lightmapScaleOffset, OUT) OUT.xy = lightmapUV.xy * lightmapScaleOffset.xy + lightmapScaleOffset.zw;
#define OUTPUT_SH(normalWS, OUT)
#else
#define DECLARE_LIGHTMAP_OR_SH(lmName, shName, index) half3 shName : TEXCOORD##index
#define OUTPUT_LIGHTMAP_UV(lightmapUV, lightmapScaleOffset, OUT)
#define OUTPUT_SH(normalWS, OUT) OUT.xyz = SampleSHVertex(normalWS)
#endif
9、GetShadowCoord
#if defined(REQUIRES_VERTEX_SHADOW_COORD_INTERPOLATOR)
output.shadowCoord = GetShadowCoord(vertexInput);
#endif
float4 GetShadowCoord(VertexPositionInputs vertexInput)
{
return TransformWorldToShadowCoord(vertexInput.positionWS);
}
half ComputeCascadeIndex(float3 positionWS)
{
float3 fromCenter0 = positionWS - _CascadeShadowSplitSpheres0.xyz;
float3 fromCenter1 = positionWS - _CascadeShadowSplitSpheres1.xyz;
float3 fromCenter2 = positionWS - _CascadeShadowSplitSpheres2.xyz;
float3 fromCenter3 = positionWS - _CascadeShadowSplitSpheres3.xyz;
float4 distances2 = float4(dot(fromCenter0, fromCenter0), dot(fromCenter1, fromCenter1), dot(fromCenter2, fromCenter2), dot(fromCenter3, fromCenter3));
half4 weights = half4(distances2 < _CascadeShadowSplitSphereRadii);
weights.yzw = saturate(weights.yzw - weights.xyz);
return 4 - dot(weights, half4(4, 3, 2, 1));
}
float4 TransformWorldToShadowCoord(float3 positionWS)
{
#ifdef _MAIN_LIGHT_SHADOWS_CASCADE
half cascadeIndex = ComputeCascadeIndex(positionWS);
#else
half cascadeIndex = 0;
#endif
return mul(_MainLightWorldToShadow[cascadeIndex], float4(positionWS, 1.0));
}
片元着色器 LitPassFragment
结构体SurfaceData
1、InitializeStandardLitSurfaceData 贴图信息打包
输入:顶点传递过来的float2 UV
输出:结构体SurfaceData
功能:初始化表面数据,对所有贴图信息采样,贴图信息打包。1.1、优先采样albedoMap贴图,并得到A通道信息;1.2、使用A通道来优先剔除计算(性能优化,节省后续没有必要的计算消耗)。1.3、根据不同工作流判断,得到不同的specGloss信息,在此获取金属和粗糙度信息,或者metallic roughnessMap贴图,并带各自强度控制。1.4、采样normalMap贴图,并对法线贴图解包和scale强度控制。1.5、含图形质量判断,采样aoMap,带强度控制。1.6、采样EmissionMap,HDR _EmissionColor自带强度控制。
output.uv = TRANSFORM_TEX(input.texcoord, _BaseMap);//顶点内的uv计算
InitializeStandardLitSurfaceData(input.uv, surfaceData);//片元内的计算
inline void InitializeStandardLitSurfaceData(float2 uv, out SurfaceData outSurfaceData)
{
half4 albedoAlpha = SampleAlbedoAlpha(uv, TEXTURE2D_ARGS(_BaseMap, sampler_BaseMap));
outSurfaceData.alpha = Alpha(albedoAlpha.a, _BaseColor, _Cutoff);
half4 specGloss = SampleMetallicSpecGloss(uv, albedoAlpha.a);
outSurfaceData.albedo = albedoAlpha.rgb * _BaseColor.rgb;
#if _SPECULAR_SETUP
outSurfaceData.metallic = 1.0h;
outSurfaceData.specular = specGloss.rgb;
#else
outSurfaceData.metallic = specGloss.r;
outSurfaceData.specular = half3(0.0h, 0.0h, 0.0h);
#endif
outSurfaceData.smoothness = specGloss.a;
outSurfaceData.normalTS = SampleNormal(uv, TEXTURE2D_ARGS(_BumpMap, sampler_BumpMap), _BumpScale);
outSurfaceData.occlusion = SampleOcclusion(uv);
outSurfaceData.emission = SampleEmission(uv, _EmissionColor.rgb, TEXTURE2D_ARGS(_EmissionMap, sampler_EmissionMap));
}
1.1、SampleAlbedoAlpha
half4 albedoAlpha = SampleAlbedoAlpha(uv, TEXTURE2D_ARGS(_BaseMap, sampler_BaseMap));
half4 SampleAlbedoAlpha(float2 uv, TEXTURE2D_PARAM(albedoAlphaMap, sampler_albedoAlphaMap))
{
return SAMPLE_TEXTURE2D(albedoAlphaMap, sampler_albedoAlphaMap, uv);
}
1.2、Alpha
outSurfaceData.alpha = Alpha(albedoAlpha.a, _BaseColor, _Cutoff);
half Alpha(half albedoAlpha, half4 color, half cutoff)
{
#if !defined(_SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A) && !defined(_GLOSSINESS_FROM_BASE_ALPHA)
half alpha = albedoAlpha * color.a;
#else
half alpha = color.a;
#endif
#if defined(_ALPHATEST_ON)
clip(alpha - cutoff);
#endif
return alpha;
}
1.3、SampleMetallicSpecGloss
half4 specGloss = SampleMetallicSpecGloss(uv, albedoAlpha.a);
#ifdef _SPECULAR_SETUP
#define SAMPLE_METALLICSPECULAR(uv) SAMPLE_TEXTURE2D(_SpecGlossMap, sampler_SpecGlossMap, uv)
#else
#define SAMPLE_METALLICSPECULAR(uv) SAMPLE_TEXTURE2D(_MetallicGlossMap, sampler_MetallicGlossMap, uv)
#endif
half4 SampleMetallicSpecGloss(float2 uv, half albedoAlpha)
{
half4 specGloss;
#ifdef _METALLICSPECGLOSSMAP
specGloss = SAMPLE_METALLICSPECULAR(uv);
#ifdef _SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A
specGloss.a = albedoAlpha * _Smoothness;
#else
specGloss.a *= _Smoothness;
#endif
#else // _METALLICSPECGLOSSMAP
#if _SPECULAR_SETUP
specGloss.rgb = _SpecColor.rgb;
#else
specGloss.rgb = _Metallic.rrr;
#endif
#ifdef _SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A
specGloss.a = albedoAlpha * _Smoothness;
#else
specGloss.a = _Smoothness;
#endif
#endif
return specGloss;
}
1.4、对法线的处理
一般来说法线贴图,最终调取的还是UnpackNormalAG函数
outSurfaceData.normalTS = SampleNormal(uv, TEXTURE2D_ARGS(_BumpMap, sampler_BumpMap), _BumpScale);
half3 SampleNormal(float2 uv, TEXTURE2D_PARAM(bumpMap, sampler_bumpMap), half scale = 1.0h)
{
#ifdef _NORMALMAP
half4 n = SAMPLE_TEXTURE2D(bumpMap, sampler_bumpMap, uv);
#if BUMP_SCALE_NOT_SUPPORTED
return UnpackNormal(n);
#else
return UnpackNormalScale(n, scale);
#endif
#else
return half3(0.0h, 0.0h, 1.0h);
#endif
}
1.5、SampleOcclusion
outSurfaceData.occlusion = SampleOcclusion(uv);
half SampleOcclusion(float2 uv)
{
#ifdef _OCCLUSIONMAP
// TODO: Controls things like these by exposing SHADER_QUALITY levels (low, medium, high)
#if defined(SHADER_API_GLES)
return SAMPLE_TEXTURE2D(_OcclusionMap, sampler_OcclusionMap, uv).g;
#else
half occ = SAMPLE_TEXTURE2D(_OcclusionMap, sampler_OcclusionMap, uv).g;
return LerpWhiteTo(occ, _OcclusionStrength);
#endif
#else
return 1.0;
#endif
}
1.6、SampleEmission
outSurfaceData.emission = SampleEmission(uv, _EmissionColor.rgb, TEXTURE2D_ARGS(_EmissionMap, sampler_EmissionMap));
half3 SampleEmission(float2 uv, half3 emissionColor, TEXTURE2D_PARAM(emissionMap, sampler_emissionMap))
{
#ifndef _EMISSION
return 0;
#else
return SAMPLE_TEXTURE2D(emissionMap, sampler_emissionMap, uv).rgb * emissionColor;
#endif
}
结构体InputData
2、InitializeInputData 初始化PBR输入数据,处理顶点传递过来的数据信息
输入:结构体 Varyings,法线贴图
输出:结构体 InputData
功能:初始化PBR输入数据,处理顶点传递过来的数据信息,为后续PBR计算做准备。2.1、法线贴图空间转换,从切线空间转换到世界空间。2.2、法线重归一化,为什么重做归一化祥见前文顶点功能中的第6点说明。2.3、对视向量为0时做预防。2.4、阴影项。2.5、简单的全局光照计算:根据不同的宏来判断使用LightMap 还是使用像素SH球谐光照(非顶点球谐);一般情况,效果开发中角色会使用SH,场景会使用LightMap。
InitializeInputData(input, surfaceData.normalTS, inputData);
void InitializeInputData(Varyings input, half3 normalTS, out InputData inputData)
{
inputData = (InputData)0;
#if defined(REQUIRES_WORLD_SPACE_POS_INTERPOLATOR)
inputData.positionWS = input.positionWS;
#endif
#ifdef _NORMALMAP
half3 viewDirWS = half3(input.normalWS.w, input.tangentWS.w, input.bitangentWS.w);
inputData.normalWS = TransformTangentToWorld(normalTS,
half3x3(input.tangentWS.xyz, input.bitangentWS.xyz, input.normalWS.xyz));
#else
half3 viewDirWS = input.viewDirWS;
inputData.normalWS = input.normalWS;
#endif
inputData.normalWS = NormalizeNormalPerPixel(inputData.normalWS);
viewDirWS = SafeNormalize(viewDirWS);
inputData.viewDirectionWS = viewDirWS;
#if defined(REQUIRES_VERTEX_SHADOW_COORD_INTERPOLATOR)
inputData.shadowCoord = input.shadowCoord;
#elif defined(MAIN_LIGHT_CALCULATE_SHADOWS)
inputData.shadowCoord = TransformWorldToShadowCoord(inputData.positionWS);
#else
inputData.shadowCoord = float4(0, 0, 0, 0);
#endif
inputData.fogCoord = input.fogFactorAndVertexLight.x;
inputData.vertexLighting = input.fogFactorAndVertexLight.yzw;
inputData.bakedGI = SAMPLE_GI(input.lightmapUV, input.vertexSH, inputData.normalWS);
}
2.1-2.4
2.5、SAMPLE_GI 简单的全局光照计算
inputData.bakedGI = SAMPLE_GI(input.lightmapUV, input.vertexSH, inputData.normalWS);
//我们从烘焙的光照贴图或探针中采样GI。
//如果lightmap:sampleData.xy = lightmapUV
//如果探针:sampleData.xyz = L2 SH项
#ifdef LIGHTMAP_ON
#define SAMPLE_GI(lmName, shName, normalWSName) SampleLightmap(lmName, normalWSName)
#else
#define SAMPLE_GI(lmName, shName, normalWSName) SampleSHPixel(shName, normalWSName)
#endif
2.51、SampleLightmap
//样本烘焙的光照贴图。 非方向性和定向性(如果可用)。
//不支持实时GI。
half3 SampleLightmap(float2 lightmapUV, half3 normalWS)
{
#ifdef UNITY_LIGHTMAP_FULL_HDR
bool encodedLightmap = false;
#else
bool encodedLightmap = true;
#endif
half4 decodeInstructions = half4(LIGHTMAP_HDR_MULTIPLIER, LIGHTMAP_HDR_EXPONENT, 0.0h, 0.0h);
//着色器库样本光照贴图函数可转换光照贴图uv坐标以应用偏差和比例。
//但是,通用管道已经在顶点处转换了这些坐标。 我们传递half4(1,1,0,0)和
//编译器将优化转换。
half4 transformCoords = half4(1, 1, 0, 0);
#ifdef DIRLIGHTMAP_COMBINED
return SampleDirectionalLightmap(TEXTURE2D_ARGS(unity_Lightmap, samplerunity_Lightmap),
TEXTURE2D_ARGS(unity_LightmapInd, samplerunity_Lightmap),
lightmapUV, transformCoords, normalWS, encodedLightmap, decodeInstructions);
#elif defined(LIGHTMAP_ON)
return SampleSingleLightmap(TEXTURE2D_ARGS(unity_Lightmap, samplerunity_Lightmap), lightmapUV, transformCoords, encodedLightmap, decodeInstructions);
#else
return half3(0.0, 0.0, 0.0);
#endif
}
2.52、SampleSHPixel
第一排:顶点里面最简单的SH,第二排:顶点里面的线性SH(这里需要跟片元里面的SH计算做混合),第三排:片元里面的像素SH;第四排:像素里的线性SH计算,并且跟顶点里面的线性SH结果混合(测试结果跟顶点SH效果相差不大)。从测试结果来看,单纯的像素SH效果是最好的。SH灵活运用,用途广泛。
// SH像素评估。 根据目标SH可能会进行采样
//混合像素或完全像素。 参见SampleSHVertex
half3 SampleSHPixel(half3 L2Term, half3 normalWS)
{
#if defined(EVALUATE_SH_VERTEX)
return L2Term;
#elif defined(EVALUATE_SH_MIXED)
half3 L0L1Term = SHEvalLinearL0L1(normalWS, unity_SHAr, unity_SHAg, unity_SHAb);
return max(half3(0, 0, 0), L2Term + L0L1Term);
#endif
//默认值:按像素完全评估SH
return SampleSH(normalWS);
}
#if HAS_HALF
half3 SampleSH9(half4 SHCoefficients[7], half3 N)
{
half4 shAr = SHCoefficients[0];
half4 shAg = SHCoefficients[1];
half4 shAb = SHCoefficients[2];
half4 shBr = SHCoefficients[3];
half4 shBg = SHCoefficients[4];
half4 shBb = SHCoefficients[5];
half4 shCr = SHCoefficients[6];
// Linear + constant polynomial terms
half3 res = SHEvalLinearL0L1(N, shAr, shAg, shAb);
// Quadratic polynomials
res += SHEvalLinearL2(N, shBr, shBg, shBb, shCr);
return res;
}
#endif
float3 SampleSH9(float4 SHCoefficients[7], float3 N)
{
float4 shAr = SHCoefficients[0];
float4 shAg = SHCoefficients[1];
float4 shAb = SHCoefficients[2];
float4 shBr = SHCoefficients[3];
float4 shBg = SHCoefficients[4];
float4 shBb = SHCoefficients[5];
float4 shCr = SHCoefficients[6];
// Linear + constant polynomial terms
float3 res = SHEvalLinearL0L1(N, shAr, shAg, shAb);
// Quadratic polynomials
res += SHEvalLinearL2(N, shBr, shBg, shBb, shCr);
return res;
}
3、UniversalFragmentPBR BRDF-PBR计算
输入:结构体 InputData,结构体 SurfaceData
输出:最终BRDF颜色
功能:BRDF-PBR计算。3.1、初始化BRDF Data结构体。根据金属贴图计算diffuse颜色和specular颜色。计算掠射项grazingTerm。计算反向粗糙度perceptualRoughness。计算2次方粗糙度roughness。计算4次方粗糙度roughness2。计算归一化的掠射项normalizationTerm。计算4次方粗糙度减1roughness2MinusOne。3.2、初如化Light结构体。获取到主方向光位置direction。获取光探测器的遮挡数据distanceAttenuation。计算阴影项shadowAttenuation。获取主方向光颜色color。3.3、实时光与非实时光混合。计算输出lightMap混合部分。3.4、全局光计算。BRDF间接光计算。3.5、物理灯光计算。BRDF直接光计算。
当函数拟合能熟练使用时,并不需要再做这些图例测试,可以直接给拟合曲线图例说明了。
结构体BRDFData
struct BRDFData
{
half3 diffuse;
half3 specular;
half perceptualRoughness;
half roughness;
half roughness2;
half grazingTerm;
//我们保存了一些轻度不变的BRDF术语,因此我们不必重新计算
//将它们放在灯光循环中。 查看DirectBRDF函数以获取详细说明。
half normalizationTerm; // roughness * 4.0 + 2.0
half roughness2MinusOne; // roughness^2 - 1.0
};
3.1、InitializeBRDFData
inline void InitializeBRDFData(half3 albedo, half metallic, half3 specular, half smoothness, half alpha, out BRDFData outBRDFData)
{
#ifdef _SPECULAR_SETUP
half reflectivity = ReflectivitySpecular(specular);
half oneMinusReflectivity = 1.0 - reflectivity;
outBRDFData.diffuse = albedo * (half3(1.0h, 1.0h, 1.0h) - specular);
outBRDFData.specular = specular;
#else
half oneMinusReflectivity = OneMinusReflectivityMetallic(metallic);
half reflectivity = 1.0 - oneMinusReflectivity;
outBRDFData.diffuse = albedo * oneMinusReflectivity;
outBRDFData.specular = lerp(kDieletricSpec.rgb, albedo, metallic);
#endif
outBRDFData.grazingTerm = saturate(smoothness + reflectivity);
outBRDFData.perceptualRoughness = PerceptualSmoothnessToPerceptualRoughness(smoothness);
outBRDFData.roughness = max(PerceptualRoughnessToRoughness(outBRDFData.perceptualRoughness), HALF_MIN);
outBRDFData.roughness2 = outBRDFData.roughness * outBRDFData.roughness;
outBRDFData.normalizationTerm = outBRDFData.roughness * 4.0h + 2.0h;
outBRDFData.roughness2MinusOne = outBRDFData.roughness2 - 1.0h;
#ifdef _ALPHAPREMULTIPLY_ON
outBRDFData.diffuse *= alpha;
alpha = alpha * oneMinusReflectivity + reflectivity;
#endif
}
结构体Light
3.2、GetMainLight
3.3、MixRealtimeAndBakedGI
MixRealtimeAndBakedGI(mainLight, inputData.normalWS, inputData.bakedGI, half4(0, 0, 0, 0));
void MixRealtimeAndBakedGI(inout Light light, half3 normalWS, inout half3 bakedGI, half4 shadowMask)
{
#if defined(_MIXED_LIGHTING_SUBTRACTIVE) && defined(LIGHTMAP_ON)
bakedGI = SubtractDirectMainLightFromLightmap(light, normalWS, bakedGI);
#endif
}
#if defined(_MIXED_LIGHTING_SUBTRACTIVE) && defined(LIGHTMAP_ON)
bakedGI = SubtractDirectMainLightFromLightmap(light, normalWS, bakedGI);
#endif
half3 SubtractDirectMainLightFromLightmap(Light mainLight, half3 normalWS, half3 bakedGI)
{
//让我们尝试使实时阴影在已经包含
//烘烤的灯光和主要太阳光的阴影。
//摘要:
// 1)通过从实时阴影遮挡的位置减去估计的光贡献来计算阴影中的可能值:
// a)保留其他烘焙的灯光和反弹光
// b)消除了背向灯光的几何图形上的阴影
// 2)锁定用户定义的ShadowColor。
// 3)选择原始的光照贴图值(如果它是最暗的)。
// 1)提供良好的照明估计,就好像在烘焙过程中光线会被遮盖一样。
//我们只减去主方向灯。 这在下面的贡献期内说明。
half shadowStrength = GetMainLightShadowStrength();
half contributionTerm = saturate(dot(mainLight.direction, normalWS));
half3 lambert = mainLight.color * contributionTerm;
half3 estimatedLightContributionMaskedByInverseOfShadow = lambert * (1.0 - mainLight.shadowAttenuation);
half3 subtractedLightmap = bakedGI - estimatedLightContributionMaskedByInverseOfShadow;
// 2)允许用户定义场景的整体环境并在实时阴影变得太暗时控制情况。
half3 realtimeShadow = max(subtractedLightmap, _SubtractiveShadowColor.xyz);
realtimeShadow = lerp(bakedGI, realtimeShadow, shadowStrength);
// 3)选择最深的颜色
return min(bakedGI, realtimeShadow);
}
3.4、GlobalIllumination
half3 color = GlobalIllumination(brdfData, inputData.bakedGI, occlusion, inputData.normalWS, inputData.viewDirectionWS);
half3 GlobalIllumination(BRDFData brdfData, half3 bakedGI, half occlusion, half3 normalWS, half3 viewDirectionWS)
{
half3 reflectVector = reflect(-viewDirectionWS, normalWS);
half fresnelTerm = Pow4(1.0 - saturate(dot(normalWS, viewDirectionWS)));
half3 indirectDiffuse = bakedGI * occlusion;
half3 indirectSpecular = GlossyEnvironmentReflection(reflectVector, brdfData.perceptualRoughness, occlusion);
return EnvironmentBRDF(brdfData, indirectDiffuse, indirectSpecular, fresnelTerm);
}
GlossyEnvironmentReflection
half3 GlobalIllumination(BRDFData brdfData, half3 bakedGI, half occlusion, half3 normalWS, half3 viewDirectionWS)
{
half3 reflectVector = reflect(-viewDirectionWS, normalWS);
half fresnelTerm = Pow4(1.0 - saturate(dot(normalWS, viewDirectionWS)));
half3 indirectDiffuse = bakedGI * occlusion;
half3 indirectSpecular = GlossyEnvironmentReflection(reflectVector, brdfData.perceptualRoughness, occlusion);
return EnvironmentBRDF(brdfData, indirectDiffuse, indirectSpecular, fresnelTerm);
}
void MixRealtimeAndBakedGI(inout Light light, half3 normalWS, inout half3 bakedGI, half4 shadowMask)
{
#if defined(_MIXED_LIGHTING_SUBTRACTIVE) && defined(LIGHTMAP_ON)
bakedGI = SubtractDirectMainLightFromLightmap(light, normalWS, bakedGI);
#endif
}
EnvironmentBRDF
return EnvironmentBRDF(brdfData, indirectDiffuse, indirectSpecular, fresnelTerm);
half3 EnvironmentBRDF(BRDFData brdfData, half3 indirectDiffuse, half3 indirectSpecular, half fresnelTerm)
{
half3 c = indirectDiffuse * brdfData.diffuse;
float surfaceReduction = 1.0 / (brdfData.roughness2 + 1.0);
c += surfaceReduction * indirectSpecular * lerp(brdfData.specular, brdfData.grazingTerm, fresnelTerm);
return c;
}
3.5、LightingPhysicallyBased
half3 LightingPhysicallyBased(BRDFData brdfData, half3 lightColor, half3 lightDirectionWS, half lightAttenuation, half3 normalWS, half3 viewDirectionWS)
{
half NdotL = saturate(dot(normalWS, lightDirectionWS));
half3 radiance = lightColor * (lightAttenuation * NdotL);
return DirectBDRF(brdfData, normalWS, lightDirectionWS, viewDirectionWS) * radiance;
}
DirectBDRF 内含公式拟合说明
//基于极简主义的CookTorrance BRDF
//实现与原始推导略有不同:http://www.thetenthplanet.de/archives/255
// * NDF [修改] GGX
// *修改了Kelemen和Szirmay-Kalos的可见度术语
// *菲涅耳近似为1 / LdotH
half3 DirectBDRF(BRDFData brdfData, half3 normalWS, half3 lightDirectionWS, half3 viewDirectionWS)
{
#ifndef _SPECULARHIGHLIGHTS_OFF
float3 halfDir = SafeNormalize(float3(lightDirectionWS) + float3(viewDirectionWS));
float NoH = saturate(dot(normalWS, halfDir));
half LoH = saturate(dot(lightDirectionWS, halfDir));
// GGX分布乘以可见性和菲涅耳组合近似
// BRDFspec =(D * V * F)/ 4.0
// D =roughness^ 2 / /(NoH ^ 2 *(roughness^ 2-1-1)+1)^ 2
// V * F = 1.0 /(LoH ^ 2 *(roughness+ 0.5))
//请参阅Siggraph 2015移动移动图形课程中的“优化移动PBR”
// https://community.arm.com/events/1155
//最终的BRDFspec =roughness^ 2 / /(NoH ^ 2 *(roughness2-1-1)+ 1)^ 2 *(LoH ^ 2 *(roughness+ 0.5)* 4.0)
//我们进一步优化了一些轻不变项
// brdfData.normalizationTerm =(roughness+ 0.5)* 4.0改写为roughness* 4.0 + 2.0以适合MAD。
float d = NoH * NoH * brdfData.roughness2MinusOne + 1.00001f;
half LoH2 = LoH * LoH;
half specularTerm = brdfData.roughness2 / ((d * d) * max(0.1h, LoH2) * brdfData.normalizationTerm);
//在一半实际上意味着某物的平台上,分母有溢出的风险
//下面的钳位是专门为“修复”而添加的,但是dx编译器(我们将字节码转换为metal / gles)
//看到 specularTerm 仅具有非负项,因此它在钳位中跳过 max(0,..)(仅保留min(100,...))
#if defined (SHADER_API_MOBILE) || defined (SHADER_API_SWITCH)
specularTerm = specularTerm - HALF_MIN;
specularTerm = clamp(specularTerm, 0.0, 100.0); //防止FP16在手机上溢出
#endif
half3 color = specularTerm * brdfData.specular + brdfData.diffuse;
return color;
#else
return brdfData.diffuse;
#endif
}
3.6、GetAdditionalLightsCount 多光照暂时略过
4、MixFog
real ComputeFogIntensity(real fogFactor)
{
real fogIntensity = 0.0h;
#if defined(FOG_LINEAR) || defined(FOG_EXP) || defined(FOG_EXP2)
#if defined(FOG_EXP)
// factor = exp(-density*z)
// fogFactor = density*z compute at vertex
fogIntensity = saturate(exp2(-fogFactor));
#elif defined(FOG_EXP2)
// factor = exp(-(density*z)^2)
// fogFactor = density*z compute at vertex
fogIntensity = saturate(exp2(-fogFactor * fogFactor));
#elif defined(FOG_LINEAR)
fogIntensity = fogFactor;
#endif
#endif
return fogIntensity;
}
half3 MixFogColor(real3 fragColor, real3 fogColor, real fogFactor)
{
#if defined(FOG_LINEAR) || defined(FOG_EXP) || defined(FOG_EXP2)
real fogIntensity = ComputeFogIntensity(fogFactor);
fragColor = lerp(fogColor, fragColor, fogIntensity);
#endif
return fragColor;
}
half3 MixFog(real3 fragColor, real fogFactor)
{
return MixFogColor(fragColor, unity_FogColor.rgb, fogFactor);
}
见解总结:笔记有点过长,分成三篇记录。第一篇:《基于 Disney BRDF 算法分析》主要记录对迪斯尼的方程相关学习;第二篇:《Unity Builtin BRDF 算法分析》主要记录对unity 标准管线下的原理疏通;第三篇,也就是本篇《基于 Unity URP BRDF 算法分析》主要记录在unityURP管线下的PBR学习。把渲染当做建造房子,那么基石是一个可行的标准材质PBR,框架是光效,点睛之笔是后处理,添砖加瓦是其他效果(大气雾、水、特效)。
Unity 自有一套较好的PBR拟合,兼顾了效果和性能,所以不用太纠结unity没完整的把Disney-BRDF搬过来,对于移动平台来说他是极其友好的(数学拟合半桶水的情况下,最好别跟Unity官方Aras去比效果和性能兼顾的数学拟合,想去深度优化PBR效果的,推荐把拟合精通了再说,Aras Pranckevičius大神用的是Excel来做拟合:P)。Unity的shader源码是值得让人深入学习研究的,是一个巨大的宝库,到现在来说是我最好的示例老师;当然图形学基础知识和算法也是最好的老师,从书本中能学到更多的东西。我常把Shader的效果开发比作武功修练:会写Shader,好比在练外功(很多时候你在摘取别人的效果,然后自己创造一点独特效果,当然这种也是可行的);而写好Shader,去创造自己的风格与艺术效果,这是内功(熟练运用图形学的各种数学运算,不再是摘取,照抄别人的,数学基础扎实)。在练外功的同时,要内功扎实,随时间的推移必见成效。
在绝大多数unity项目中,美术效果提不起来的原因,是没有正确的使用Unity整套功能,又或许大多数地方使用正确了,但有的地方确没有正确使用,存在错误,而这种错误经常很致命。
在这三篇文章中,还存在很多的不足:阴影没有深入;Lightmap部分没有深入;函数拟合没有深入等等。
个人学习实践测试用的简化版URP_PBR shader,减去了复杂的逻辑keywords判断,易维护。测试使用,不推荐直用于移动平台项目,因为去除了Unity绝大多数移动上的优化(:P)。
地址:
https://github.com/MasterWangdaoyong/Shader-Graph/tree/main/Algorithm/Unity%20URP%20BRDFgithub.com
必读参考资料:
Substance PBR 指导手册
Substance Academyacademy.substance3d.com[图片上传失败...(image-8dfc08-1639564993722)]
八猴 PBR 指导手册
https://marmoset.co/posts/basic-theory-of-physically-based-rendering/marmoset.co
SIGGRAPH 2012年原版文章: 2012-Physically-Based Shading at Disney
本人整理过后的中文版:
MasterWangdaoyong/Shader-Graphgithub.com
SIGGRAPH 2017年原版文章: 2017-Reflectance Models (BRDF)
https://cgg.mff.cuni.cz/~pepca/lectures/pdf/pg2-05-brdf.en.pdfcgg.mff.cuni.cz
闫令琪(闫神):
GAMES: Graphics And Mixed Environment Seminargames-cn.org
希望也有学101,102,201,202的朋友讨论沟通:)
个人学习注释笔记地址:
https://github.com/MasterWangdaoyong/Shader-Graph/tree/main/Unity_SourceCodegithub.com
https://github.com/MasterWangdaoyong/Shader-Graph/tree/main/Unity_SourceCodegithub.com
毛星云(大佬):
毛星云:【基于物理的渲染(PBR)白皮书】(三)迪士尼原则的BRDF与BSDF相关总结zhuanlan.zhihu.com雨轩先行者同类资料:
雨轩:Unity PBR Standard Shader 实现详解 (四)BRDF函数计算zhuanlan.zhihu.com熊新科: 源码解析 第10章节 第11章节
冯乐乐:入门精要 第18章节