一、UV幀動畫
舉個例子,對于一張四幀貼圖(橫向):
1.UV縮放:首先就是讓它隻顯示四分之一吧,也就是将uv值壓縮(x軸),原來的uv範圍由(0,1)轉為(0,0.25),用代碼表示就是spriteUV.x *= 1.0 / 4
2.UV移動:以貼圖的左下角為例(設uv值為(0,0)),那麼随着時間的變化,uv值就應該變為(0.25,0),(0.5,0),(0.75,0),(1,0),每次的增加量就是1.0 / 4
Shader "Custom/NewSurfaceShader" {
Properties {
_MainTex ("幀序列圖", 2D) = "white" {}
_CellAmount("精靈個數", float) = 0.0
_Speed("移動速度", float) = 0.0
}
SubShader {
Tags { "RenderType"="Opaque" }
CGPROGRAM
#pragma surface surf Lambert
sampler2D _MainTex;
half _CellAmount;
half _Speed;
struct Input {
float2 uv_MainTex;
};
void surf (Input IN, inout SurfaceOutput o) {
float2 spriteUV = IN.uv_MainTex;
float percent = 1.0 / _CellAmount;
float timeVal = fmod(_Time.y * _Speed, _CellAmount);//取餘數,得到一個小于_CellAmount的數
timeVal = ceil(timeVal);//取整
spriteUV.x = spriteUV.x * percent + timeVal * percent;
fixed4 c = tex2D(_MainTex, spriteUV);
o.Albedo = c.rgb;
o.Alpha = c.a;
}
ENDCG
}
FallBack "Diffuse"
}

Ps:因為使用的紋理的精靈并不是均勻分布的,是以會有誤差
二、紋理混合
lerp(a, b, f):等于a * (1 - f) + b * f
Shader "CookbookShaders/Chapter02/TextureBlending"
{
Properties
{
_MainTint ("Diffuse Tint", Color) = (1,1,1,1)
//Add the properties below so we can input all of our textures
_ColorA ("Terrain Color A", Color) = (1,1,1,1)
_ColorB ("Terrain Color B", Color) = (1,1,1,1)
_RTexture ("Red Channel Texture", 2D) = ""{}
_GTexture ("Green Channel Texture", 2D) = ""{}
_BTexture ("Blue Channel Texture", 2D) = ""{}
//_ATexture ("Alpha Channel Texture", 2D) = ""{}
_BlendTex ("Blend Texture", 2D) = ""{}
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf Lambert
float4 _MainTint;
float4 _ColorA;
float4 _ColorB;
sampler2D _RTexture;
sampler2D _GTexture;
sampler2D _BTexture;
sampler2D _BlendTex;
//sampler2D _ATexture;
struct Input
{
float2 uv_RTexture;
float2 uv_GTexture;
float2 uv_BTexture;
//float2 uv_ATexture;
float2 uv_BlendTex;
};
void surf (Input IN, inout SurfaceOutput o)
{
//Get the pixel data from the blend texture
//we need a float 4 here because the texture
//will return R,G,B,and A or X,Y,Z, and W
float4 blendData = tex2D(_BlendTex, IN.uv_BlendTex);
//Get the data from the textures we want to blend
float4 rTexData = tex2D(_RTexture, IN.uv_RTexture);
float4 gTexData = tex2D(_GTexture, IN.uv_GTexture);
float4 bTexData = tex2D(_BTexture, IN.uv_BTexture);
//float4 aTexData = tex2D(_ATexture, IN.uv_ATexture);
//No we need to contruct a new RGBA value and add all
//the different blended texture back together
float4 finalColor;
finalColor = lerp(rTexData, gTexData, blendData.g);
finalColor = lerp(finalColor, bTexData, blendData.b);
//finalColor = lerp(finalColor, aTexData, blendData.a);
finalColor.a = 1.0;
//Add on our terrain tinting colors
float4 terrainLayers = lerp(_ColorA, _ColorB, blendData.r);
finalColor *= terrainLayers;
finalColor = saturate(finalColor);
o.Albedo = finalColor.rgb * _MainTint.rgb;
o.Alpha = finalColor.a;
}
ENDCG
}
FallBack "Diffuse"
}
三、建立可程式設計紋理
有時,你想在運作時動态建立紋理,改變它們的像素,以此實作特效。我們通常稱之為可程式設計紋理技術(procedural texture effects)。與在圖形編輯軟體中繪制貼圖不同的是,你可以在二維坐标系中創造一組像素,并應用到一個新的貼圖中去,最後把這張貼圖傳遞給shader中計算。
這項技術非常實用,它可在已經存在的貼圖中,使用動态建立的貼圖來給玩家和遊戲環境做插值。除此以外,它也可作為一種裁剪類型的特效,或作為程式動态生成的形狀。在多數情況下,你想要建立一張這樣的全新貼圖,把程式邏輯填進去,并在Shader中正常運作。
using UnityEngine;
using System.Collections;
public class ProceduralTexture : MonoBehaviour
{
#region Public Variables
//These values will let us control the width/Height
//and see the generated texture
public int widthHeight = 512;
public Texture2D generatedTexture;
#endregion
#region Private Variables
//These variables will be internal to this
//script
private Material currentMaterial;
private Vector2 centerPosition;
#endregion
// Use this for initialization
void Start ()
{
//Simple check to make sure we have a material on this transform
//This will determine if we can make a texture or not
if(!currentMaterial)
{
currentMaterial = transform.GetComponent<Renderer>().sharedMaterial;
if(!currentMaterial)
{
Debug.LogWarning("Cannot find a material on: " + transform.name);
}
}
//generate the procedural texture
if(currentMaterial)
{
//Generate the prabola texture
centerPosition = new Vector2(0.5f, 0.5f);
generatedTexture = GenerateParabola();
//Assign it to this transforms material
currentMaterial.SetTexture("_MainTex", generatedTexture);
}
Debug.Log(Vector2.Distance(new Vector2(256,256), new Vector2(32,32))/256.0f);
}
private Texture2D GenerateParabola()
{
//Create a new Texture2D
Texture2D proceduralTexture = new Texture2D(widthHeight, widthHeight);
//Get the center of the texture
Vector2 centerPixelPosition = centerPosition * widthHeight;
//loop through each pixel of the new texture and determine its
//distance from the center and assign a pixel value based on that.
for(int x = 0; x < widthHeight; x++)
{
for(int y = 0; y < widthHeight; y++)
{
//Get the distance from the center of the texture to
//our currently selected pixel
Vector2 currentPosition = new Vector2(x,y);
float pixelDistance = Vector2.Distance(currentPosition, centerPixelPosition)/(widthHeight*0.5f);
pixelDistance = Mathf.Abs(1-Mathf.Clamp(pixelDistance, 0f,1f));
pixelDistance = (Mathf.Sin(pixelDistance * 30.0f) * pixelDistance);
//you can also do some more advanced vector calculations to achieve
//other types of data about the model itself and its uvs and
//pixels
Vector2 pixelDirection = centerPixelPosition - currentPosition;
pixelDirection.Normalize();
float rightDirection = Vector2.Angle(pixelDirection, Vector3.right)/360;
float leftDirection = Vector2.Angle(pixelDirection, Vector3.left)/360;
float upDirection = Vector2.Angle(pixelDirection, Vector3.up)/360;
//Invert the values and make sure we dont get any negative values
//or values above 1.
//Create a new color value based off of our
//Color pixelColor = new Color(Mathf.Max(0.0f, rightDirection),Mathf.Max(0.0f, leftDirection), Mathf.Max(0.0f,upDirection), 1f);
Color pixelColor = new Color(pixelDistance, pixelDistance, pixelDistance, 1.0f);
//Color pixelColor = new Color(rightDirection, leftDirection, upDirection, 1.0f);
proceduralTexture.SetPixel(x,y,pixelColor);
}
}
//Finally force the application of the new pixels to the texture
proceduralTexture.Apply();
//return the texture to the main program.
return proceduralTexture;
}
}
四、透明效果
要在表面着色器使用透明效果,需要在#pragma中加入alpha
Shader "Cookbook/Chapter06/SimpleAlpha"
{
Properties
{
_MainTex ("Base (RGB)", 2D) = "white" {}
_TransVal ("Transparency Value", Range(0,1)) = 0.5
}
SubShader
{
Tags { "RenderType"="Opaque"}
LOD 200
CGPROGRAM
#pragma surface surf Lambert alpha
sampler2D _MainTex;
float _TransVal;
struct Input
{
float2 uv_MainTex;
};
void surf (Input IN, inout SurfaceOutput o)
{
half4 c = tex2D (_MainTex, IN.uv_MainTex);
o.Albedo = c.rgb;
o.Alpha = c.b * _TransVal;
}
ENDCG
}
FallBack "Diffuse"
}
五、裁剪透明
要在表面着色器使用透明效果,需要在#pragma中加入alphatest:
該透明效果隻用一個數值就可以簡單地把某些像素繪制到螢幕上,要麼呈現出完全透明效果,要麼是完全不透明效果。
我們的cutoff shader在#pragma指令中使用了一個新的參數,名為alphatest:VariableName。這個參數将我們的Shader設定為一個簡化版的透明度。與半透明度效果不同,隻有_Cutoff變量控制最終的透明效果。這意味着,如果我們讓_Cutoff值為0.4,那麼任何低于0.4的灰階值會被認為是透明的,而高于0.4的值被認為是不透明的。
使用這種類型的透明效果的好處是可以提高性能。之前的半透明效果的性能耗費要高于cutoff類型。但是,在移動裝置上是正好相反的,這是因為對于這些小型的GPU來說,檢查一張貼圖中的每一個像素值是相當耗費性能的。是以,如果當你正使用Unity開發一款移動應用時,盡量使用半透明技術,減少使用本節中提到的cutoff透明技術。
Shader "Cookbook/Chapter06/CutoffShader"
{
Properties
{
_MainTex ("Base (RGB)", 2D) = "white" {}
_Cutoff ("Cutoff Value", Range(0,1)) = 0.5
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf Lambert alphatest:_Cutoff
sampler2D _MainTex;
struct Input
{
float2 uv_MainTex;
};
void surf (Input IN, inout SurfaceOutput o)
{
half4 c = tex2D (_MainTex, IN.uv_MainTex);
o.Albedo = c.rgb;
o.Alpha = c.r;
}
ENDCG
}
FallBack "Diffuse"
}