@cybereality said:
Can you post the code you are using to do the conversion?
shader_type spatial;
render_mode cull_front;
uniform sampler2D vertexs;
uniform int vertexsTextureWidth;
uniform int vertexsCount;
varying vec3 color;
render_mode skip_vertex_transform;
vec2 ComputeOffset(vec2 prevP, vec2 currentP, vec2 nextP, vec2 flag){ //Calculate offset direction based on two adjacent points
vec2 dir1 = vec2(0, 0);
vec2 dir2 = vec2(0, 0);
vec2 dir = vec2(0, 0);
if(abs(nextP.x - currentP.x)<=0.000001 && abs(nextP.y - currentP.y)<=0.000001)
dir = normalize( currentP - prevP );
else if( abs(prevP.x - currentP.x)<=0.000001 && abs(prevP.y - currentP.y) <=0.000001)
dir = normalize( nextP - currentP );
else
{
dir1 = normalize( currentP - prevP );
dir2 = normalize( nextP - currentP );
dir = normalize( dir1 + dir2 );
}
if(dir == vec2(0, 0))
{
if(flag.x < flag.y)
dir = vec2(dir1.y, -dir1.x);
else
dir = vec2(dir1.y, dir1.x);
}
else
{
dir = vec2(dir.y, -dir.x);
}
float angle = acos(dot(dir, dir1));
float width = abs(1.0 / sin(angle));
return dir * width;
}
vec4 unproject(vec2 screen, vec2 screen_size, float z, float w) { //Convert back to spatial coordinates
vec2 clip_pos = vec2(screen.x / screen_size.x, screen.y / screen_size.y);
vec3 clip_pos_3d = vec3(clip_pos,z);
vec3 device_normal = clip_pos_3d * 2.0 - 1.0;
vec4 res = vec4(device_normal * w, w);
return res;
}
vec4 transform_screen_pos(mat4 project, mat4 MODELVIEW, vec3 coord, vec2 screen_size){ //Convert to screen coordinates
vec4 device = project * MODELVIEW * vec4(coord.xyz, 1.0);
vec3 device_normal = device.xyz / device.w;
vec3 clip_pos_3d = (device_normal * 0.5 + 0.5);
float z = clip_pos_3d.z;
float w = device.w;
vec2 clip_pos_2d = clip_pos_3d.xy;
vec2 screen_pos = vec2(clip_pos_2d.x * screen_size.x, clip_pos_2d.y * screen_size.y);
vec4 res = vec4(screen_pos, z, w);
return res;
}
vec3 getAdjacentVertex(int index) //Read the position of adjacent points in the texture image.
{
int x = index % vertexsTextureWidth;
int y = index / vertexsTextureWidth;
vec3 vertex = texelFetch(vertexs, ivec2(x, y), 0).xyz;
return vertex;
}
void vertex(){
vec2 view_size = vec2(1024, 600);
vec3 prevP_3d = getAdjacentVertex(int(UV2.x)); //Get the previous vertex adjacent to it. x,y in uv2 represent the subscripts of two adjacent points respectively
vec3 nextP_3d = getAdjacentVertex(int(UV2.y)); //Get the next vertex adjacent to it
vec4 prevP = transform_screen_pos(PROJECTION_MATRIX, MODELVIEW_MATRIX, prevP_3d, view_size);
vec4 nextP = transform_screen_pos(PROJECTION_MATRIX, MODELVIEW_MATRIX, nextP_3d, view_size);
vec4 currentP = transform_screen_pos(PROJECTION_MATRIX, MODELVIEW_MATRIX, VERTEX, view_size);
vec2 offset = ComputeOffset(prevP.xy, currentP.xy, nextP.xy, UV2);
currentP = currentP + 5.0 * vec4(offset,0,0);
VERTEX = (INV_PROJECTION_MATRIX * unproject(currentP.xy, view_size, currentP.z, currentP.w)).xyz;
}
void fragment(){
ALBEDO = vec3(0, 1, 0);
}
The code is confusing, but the coordinate conversion process is no problem. The purpose of the code is to widen the line from (0,0,0) to (10,0,0).
I had this problem in version 3.4, but not in 4.0.