注:學習實踐階段,參考了unity shader 入門精要一書。
模糊效果
在進入慢速度狀态時希望在視覺上呈現模糊效果,類似那種錄影機高速模糊,有殘影的效果。
試了兩種方案,都是基于後處理。一種采用不斷疊加上一幀渲染紋理的方式,一種利用深度紋理重建世界坐标計算像素速度的方式進行渲染。但第二種方法的效果在該遊戲上的效果并不适合,抖動太過劇烈。如圖
![](https://img.laitimes.com/img/_0nNw4CM6IyYiwiM6ICdiwiIyVGduV2YfNWawNCM38FdsYkRGZkRG9lcvx2bjxiNx8VZ6l2cs0TPn5ENRpmTykERPpHOsJGcohVYsR2MMBjVtJWd0ckW65UbM5WOHJWa5kHT20ESjBjUIF2X0hXZ0xCMx81dvRWYoNHLrdEZwZ1Rh5WNXp1bwNjW1ZUba9VZwlHdssmch1mclRXY39CXldWYtlWPzNXZj9mcw1ycz9WL49zZuBnL1UDOxMjMzEjMzIjNwAjMwIzLc52YucWbp5GZzNmLn9Gbi1yZtl2Lc9CX6MHc0RHaiojIsJye.png)
而第一種實作的效果比較理想,如圖(截圖原因可能并不明顯,仔細看能看到拖尾效果):
而且第一種方法實作更加簡單,即利用一張渲染紋理來儲存上一幀的處理結果,每一次處理通過将上一幀儲存的渲染紋理和目前的渲染紋理進行混合疊加。
步驟
- 定義一個高斯模糊系數:
[Range(0.0f, 0.9f)]
ublic float blurAmount = 0.5f;
- 定義一個渲染紋理來儲存前一幀混合紋理、
private RenderTexture accumulationTexture;
void OnDisable()
{
DestroyImmediate(accumulationTexture);//銷毀
}
- 實作OnRenderImage函數:
void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if(material != null)
{
if (accumulationTexture == null || accumulationTexture.width != source.width || accumulationTexture.height != source.height)
{
DestroyImmediate(accumulationTexture);
accumulationTexture = new RenderTexture(source.width, source.height, 0);
accumulationTexture.hideFlags = HideFlags.HideAndDontSave;
Graphics.Blit(source, accumulationTexture);
}
//避免沒有清空 RenderTexture 時 Unity 跳出警告
accumulationTexture.MarkRestoreExpected();
material.SetFloat("_BlurAmount", 1.0f - blurAmount);
Graphics.Blit (source, accumulationTexture, material,0);
Graphics.Blit (accumulationTexture, destination);
}
else
{
Graphics.Blit(source,destination);
}
}
- 實作shader
//兩個pass,一個混合,隻取RBG通道,一個隻取A通道,避免A通道由于混合被改變
Shader "Unlit/motionBlur"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_BlurAmount("Blur Amount",Float) = 1.0
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
CGINCLUDE
#include "UnityCG.cginc"
sampler2D _MainTex;
fixed _BlurAmount;
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
fixed4 fragRGB (v2f i) : SV_Target
{
return fixed4(tex2D(_MainTex, i.uv).rgb, _BlurAmount);
}
half4 fragA (v2f i) : SV_Target
{
return tex2D(_MainTex, i.uv);
}
ENDCG
ZTest Always Cull Off ZWrite Off
Pass {
Blend SrcAlpha OneMinusSrcAlpha
ColorMask RGB //隻取RBG通道
CGPROGRAM
#pragma vertex vert
#pragma fragment fragRGB
ENDCG
}
Pass {
Blend One Zero
ColorMask A //隻取A通道
CGPROGRAM
#pragma vertex vert
#pragma fragment fragA
ENDCG
}
}
FallBack Off
}
附帶第二種方案
- 利用像素速度方向進行模糊
利用深度紋理擷取像素的深度資訊并結合UI坐标,得到該幀的(歸一化的裝置坐标,或稱其次裁剪空間坐标,或稱規範立方體坐标)坐标,通過視角裁剪矩陣(視角矩陣和其次裁剪矩陣的乘積)的逆矩陣,将像素轉換到世界空間獲得世界坐标,再利用上一幀緩存的視角裁剪矩陣,将該坐标轉換到NDC坐标,與該幀的NDC坐标比較,得到像素的移動速度,然後通過混合移動速度方向上的幾個(3個左右)像素(利用UV偏移)來達到模糊效果。
- 定義一個儲存上一幀錄影機的視角投影矩陣變量
- 定義一個模糊系數
[Range(0.0f, 0.9f)]
public float blurAmount = 0.5f;
- 設定擷取深度紋理及初始化上一幀視口-投影變換矩陣
- void OnEnable()
{
//開啟後會将紋理資料傳給shader 的sampler2D _CameraDepthTexture 變量;
camera.depthTextureMode |= DepthTextureMode.Depth;
previousViewProjectionMatrix = cam.projectionMatrix * cam.worldToCameraMatrix;
}
- 實作OnRenderImage函數:
void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if(material != null)
{
//設定參數
material.SetFloat("_BlurSize", blurAmount);
material.SetMatrix("_PreviousViewProjectionMatrix", previousViewProjectionMatrix);
//求的目前視口-投影變換矩陣
Matrix4x4 currentViewProjectionMatrix = cam.projectionMatrix * cam.worldToCameraMatrix;
//求其逆矩陣,利用目前視角-投影變換逆矩陣重建目前世界坐标
Matrix4x4 currentViewProjectionInverseMatrix = currentViewProjectionMatrix.inverse;
material.SetMatrix("_CurrentViewProjectionInverseMatrix", currentViewProjectionInverseMatrix);
//儲存目前視角-投影變換矩陣
previousViewProjectionMatrix = currentViewProjectionMatrix;
Graphics.Blit (source, destination, material);
}
else
{
Graphics.Blit(source,destination);
}
}
- shader實作
Shader "Unlit/motionBlurbyDepth"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_BlurSize ("Blur Size", Float) = 1.0
}
SubShader
{
//Tags { "RenderType"="Opaque" }
//LOD 100
CGINCLUDE
#include "UnityCG.cginc"
sampler2D _MainTex;
fixed _BlurSize;
sampler2D _CameraDepthTexture;
float4x4 _CurrentViewProjectionInverseMatrix;
float4x4 _PreviousViewProjectionMatrix;
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float2 uv_depth : TEXCOORD1;
float4 vertex : SV_POSITION;
};
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
o.uv_depth = v.uv;
return o;
}
fixed4 frag(v2f i) : SV_Target
{
//擷取深度紋理
float depth = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv_depth);//這個深度值是NDC空間的,是非線性的
//轉換坐标空間,由紋理坐标空間0-1轉換到NDC
float4 NDCPos = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, depth * 2 - 1, 1);
//變換到世界坐标
float4 worldPos = mul(_CurrentViewProjectionInverseMatrix, NDCPos);
worldPos /= worldPos.w;
//獲得該點在上一次視角空間下的NDC坐标
float4 preNDCPos = mul(_PreviousViewProjectionMatrix, worldPos);
preNDCPos /= preNDCPos.w;
//求出像素速度,即內插補點
float2 velocity = (NDCPos.xy - preNDCPos.xy)/2.0f;
float2 uv = i.uv;
float4 c = tex2D(_MainTex, uv);
//求得偏移UV坐标
uv -= velocity * _BlurSize;
for (int it = 1; it < 3; it++, uv -= velocity * _BlurSize)
{
float4 currentColor = tex2D(_MainTex, uv);
c += currentColor;
}
//三次偏移坐标對應顔色求平均來混合顔色
c /= 3;
return fixed4(c.rgb, 1.0);
}
ENDCG
ZTest Always Cull Off ZWrite Off
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
ENDCG
}
}
FallBack Off
}