原文地址:http://www.manew.com/forum.php?mod=viewthread&tid=91191&page=1&authorid=119383
新建一个项目,拖入ARcamera和ImageTarget
设置两个layer层maliaohe maliao01,panel设置为maliao,马里奥模型设置为maliao01. Camera-A的culling mask去掉这两个层,Camera_B的culling mask只要maliao层
using UnityEngine;
using System.Collections;
using System;
using Vuforia;
public class NewBehaviourScript : MonoBehaviour {
//这个就是我们先前建立的panel
public GameObject panel;
void Update()
{
//获取相机图像
Matrix4x4 P = GL.GetGPUProjectionMatrix(Camera.main.projectionMatrix, false);
//相机位置
Matrix4x4 V = Camera.main.worldToCameraMatrix;
//识别图位置
Matrix4x4 M = panel.GetComponent<Renderer>().localToWorldMatrix;
Matrix4x4 MVP = P * V * M;
panel.GetComponent<Renderer>().material.SetMatrix("_MATRIX_MVP", MVP);//截图传入shader中处理
}
}
借鉴其他大神的shader,我这里就直接拿来用了,把这个shader赋给panel
Shader "Custom/Test" {
Properties {
_MainTex("Texture", 2D) = "white" { }
}
SubShader{
Pass{
Cull Back
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _MainTex;
float4x4 _MATRIX_MVP;
struct v2f{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
};
v2f vert(appdata_base v){
v2f o;
float2 screenSpacePos;
float4 clipPos;
//Convert position from world space to clip space.
//Only the UV coordinates should be frozen, so use a different matrix
clipPos = mul(_MATRIX_MVP, v.vertex);
//Convert position from clip space to screen space.
//Screen space has range x=-1 to x=1
screenSpacePos.x = clipPos.x / clipPos.w;
screenSpacePos.y = clipPos.y / clipPos.w;
//the screen space range (-1 to 1) has to be converted to
//the UV range 0 to 1
o.uv.x = (0.5f*screenSpacePos.x) + 0.5f;
o.uv.y = (0.5f*screenSpacePos.y) + 0.5f;
//The position of the vertex should not be frozen, so use
//the standard UNITY_MATRIX_MVP matrix
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
return o;
}
half4 frag(v2f i) : COLOR{
half4 texcol = tex2D(_MainTex, i.uv);
return texcol;
}
ENDCG
}
}
}
到此程序完成, 他主要的原理就是利用panel位置去获取我们相机中识别图的位置,然后把图像截取出来然后赋值给模型,很简单。下面是测试图,我比较懒,直接用彩色图当识别图。