Breakpoint 1 at 0x80cee03: file main/gauges.c, line 2789. [Thread debugging using libthread_db enabled] [New Thread 0xb7a7c6c0 (LWP 18434)] [New Thread 0xb792fb90 (LWP 18439)] [Switching to Thread 0xb7a7c6c0 (LWP 18434)] Breakpoint 1, render_gauges () at main/gauges.c:2789 2789 draw_weapon_boxes(); Breakpoint 1, render_gauges () at main/gauges.c:2789 2789 draw_weapon_boxes(); Breakpoint 2 at 0x8163528: file arch/ogl/gr.c, line 436. Breakpoint 2, ogl_urect (left=63, top=151, right=121, bot=193) at arch/ogl/gr.c:436 436 OGL_DISABLE(TEXTURE_2D); Source directories searched: /import/lightcycle/home/jlm/code/d2x-rebirth_v0.55.1-src/../Mesa-7.3/src/mesa/drivers/dri/radeon:/import/lightcycle/home/jlm/code/d2x-rebirth_v0.55.1-src/../Mesa-7.3/src/mesa:/import/lightcycle/home/jlm/code/d2x-rebirth_v0.55.1-src/../Mesa-7.3:$cdir:$cwd glDisable (cap=3553) at ../../../src/mesa/glapi/glapitemp.h:1155 1155 ../../../src/mesa/glapi/glapitemp.h: No such file or directory. in ../../../src/mesa/glapi/glapitemp.h _mesa_Disable (cap=3553) at main/enable.c:1023 1023 GET_CURRENT_CONTEXT(ctx); 1024 ASSERT_OUTSIDE_BEGIN_END(ctx); 1026 _mesa_set_enable( ctx, cap, GL_FALSE ); _mesa_set_enable (ctx=0x880faf0, cap=3553, state=0 '\0') at main/enable.c:258 258 { 265 switch (cap) { 258 { 265 switch (cap) { 622 if (!enable_texture(ctx, state, TEXTURE_2D_BIT)) { enable_texture (ctx=0x880faf0, state=0 '\0', bit=2) at main/enable.c:231 231 struct gl_texture_unit *texUnit = &ctx->Texture.Unit[curr]; 233 ? (texUnit->Enabled & ~bit) : (texUnit->Enabled | bit); 231 struct gl_texture_unit *texUnit = &ctx->Texture.Unit[curr]; 233 ? (texUnit->Enabled & ~bit) : (texUnit->Enabled | bit); 235 if (!ctx->DrawBuffer->Visual.rgbMode || texUnit->Enabled == newenabled) 238 FLUSH_VERTICES(ctx, _NEW_TEXTURE); vbo_exec_FlushVertices (ctx=0x880faf0, flags=1) at vbo/vbo_exec_api.c:745 745 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; 744 { 745 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; 747 if (exec->ctx->Driver.CurrentExecPrimitive != PRIM_OUTSIDE_BEGIN_END) 750 if (exec->vtx.vert_count) { 751 vbo_exec_vtx_flush( exec ); vbo_exec_vtx_flush (exec=0x88566c8) at vbo/vbo_exec_draw.c:228 228 if (exec->vtx.prim_count && 223 { 228 if (exec->vtx.prim_count && 71 GLuint sz = exec->vtx.vertex_size; 69 GLuint nr = exec->vtx.prim[exec->vtx.prim_count-1].count; 71 GLuint sz = exec->vtx.vertex_size; 69 GLuint nr = exec->vtx.prim[exec->vtx.prim_count-1].count; 71 GLuint sz = exec->vtx.vertex_size; 72 GLfloat *dst = exec->vtx.copied.buffer; 75 exec->vtx.vertex_size); 78 switch( exec->ctx->Driver.CurrentExecPrimitive ) 75 exec->vtx.vertex_size); 78 switch( exec->ctx->Driver.CurrentExecPrimitive ) 231 exec->vtx.copied.nr = vbo_copy_vertices( exec ); 233 if (exec->vtx.copied.nr != exec->vtx.vert_count) { 231 exec->vtx.copied.nr = vbo_copy_vertices( exec ); 233 if (exec->vtx.copied.nr != exec->vtx.vert_count) { 234 GLcontext *ctx = exec->ctx; 92 return (struct vbo_context *)(ctx->swtnl_im); 147 struct vbo_exec_context *exec = &vbo->exec; 149 GLuint count = exec->vtx.vert_count; 150 GLubyte *data = exec->vtx.buffer_map; 103 if (!ctx->VertexProgram._Current) 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 160 exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; 159 for (attr = 0; attr < 16; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 163 exec->vtx.inputs[attr + 16] = &vbo->mat_currval[attr]; 162 for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { 165 map = vbo->map_vp_none; 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 189 exec->vtx.inputs[attr] = &arrays[attr]; 191 if (exec->vtx.bufferobj->Name) { 201 arrays[attr].Ptr = (void *) data; 203 arrays[attr].Size = exec->vtx.attrsz[src]; 204 arrays[attr].StrideB = exec->vtx.vertex_size * sizeof(GLfloat); 205 arrays[attr].Stride = exec->vtx.vertex_size * sizeof(GLfloat); 206 arrays[attr].Type = GL_FLOAT; 207 arrays[attr].Enabled = 1; 205 arrays[attr].Stride = exec->vtx.vertex_size * sizeof(GLfloat); 208 _mesa_reference_buffer_object(ctx, _mesa_reference_buffer_object (ctx=0x880faf0, ptr=0x8857720, bufObj=0x87ceb80) at main/bufferobj.c:186 186 { 187 if (*ptr == bufObj) 242 } vbo_exec_vtx_flush (exec=0x88566c8) at vbo/vbo_exec_draw.c:211 211 arrays[attr]._MaxElement = count; /* ??? */ 213 data += exec->vtx.attrsz[src] * sizeof(GLfloat); 211 arrays[attr]._MaxElement = count; /* ??? */ 213 data += exec->vtx.attrsz[src] * sizeof(GLfloat); 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 189 exec->vtx.inputs[attr] = &arrays[attr]; 191 if (exec->vtx.bufferobj->Name) { 201 arrays[attr].Ptr = (void *) data; 203 arrays[attr].Size = exec->vtx.attrsz[src]; 204 arrays[attr].StrideB = exec->vtx.vertex_size * sizeof(GLfloat); 205 arrays[attr].Stride = exec->vtx.vertex_size * sizeof(GLfloat); 206 arrays[attr].Type = GL_FLOAT; 207 arrays[attr].Enabled = 1; 205 arrays[attr].Stride = exec->vtx.vertex_size * sizeof(GLfloat); 208 _mesa_reference_buffer_object(ctx, _mesa_reference_buffer_object (ctx=0x880faf0, ptr=0x8857780, bufObj=0x87ceb80) at main/bufferobj.c:186 186 { 187 if (*ptr == bufObj) 242 } vbo_exec_vtx_flush (exec=0x88566c8) at vbo/vbo_exec_draw.c:211 211 arrays[attr]._MaxElement = count; /* ??? */ 213 data += exec->vtx.attrsz[src] * sizeof(GLfloat); 211 arrays[attr]._MaxElement = count; /* ??? */ 213 data += exec->vtx.attrsz[src] * sizeof(GLfloat); 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 189 exec->vtx.inputs[attr] = &arrays[attr]; 191 if (exec->vtx.bufferobj->Name) { 201 arrays[attr].Ptr = (void *) data; 203 arrays[attr].Size = exec->vtx.attrsz[src]; 204 arrays[attr].StrideB = exec->vtx.vertex_size * sizeof(GLfloat); 205 arrays[attr].Stride = exec->vtx.vertex_size * sizeof(GLfloat); 206 arrays[attr].Type = GL_FLOAT; 207 arrays[attr].Enabled = 1; 205 arrays[attr].Stride = exec->vtx.vertex_size * sizeof(GLfloat); 208 _mesa_reference_buffer_object(ctx, _mesa_reference_buffer_object (ctx=0x880faf0, ptr=0x8857820, bufObj=0x87ceb80) at main/bufferobj.c:186 186 { 187 if (*ptr == bufObj) 242 } vbo_exec_vtx_flush (exec=0x88566c8) at vbo/vbo_exec_draw.c:211 211 arrays[attr]._MaxElement = count; /* ??? */ 213 data += exec->vtx.attrsz[src] * sizeof(GLfloat); 211 arrays[attr]._MaxElement = count; /* ??? */ 213 data += exec->vtx.attrsz[src] * sizeof(GLfloat); 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 189 exec->vtx.inputs[attr] = &arrays[attr]; 191 if (exec->vtx.bufferobj->Name) { 201 arrays[attr].Ptr = (void *) data; 203 arrays[attr].Size = exec->vtx.attrsz[src]; 204 arrays[attr].StrideB = exec->vtx.vertex_size * sizeof(GLfloat); 205 arrays[attr].Stride = exec->vtx.vertex_size * sizeof(GLfloat); 206 arrays[attr].Type = GL_FLOAT; 207 arrays[attr].Enabled = 1; 205 arrays[attr].Stride = exec->vtx.vertex_size * sizeof(GLfloat); 208 _mesa_reference_buffer_object(ctx, _mesa_reference_buffer_object (ctx=0x880faf0, ptr=0x8857aa0, bufObj=0x87ceb80) at main/bufferobj.c:186 186 { 187 if (*ptr == bufObj) 242 } vbo_exec_vtx_flush (exec=0x88566c8) at vbo/vbo_exec_draw.c:211 211 arrays[attr]._MaxElement = count; /* ??? */ 213 data += exec->vtx.attrsz[src] * sizeof(GLfloat); 211 arrays[attr]._MaxElement = count; /* ??? */ 213 data += exec->vtx.attrsz[src] * sizeof(GLfloat); 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 185 const GLuint src = map[attr]; 187 if (exec->vtx.attrsz[src]) { 189 exec->vtx.inputs[attr] = &arrays[attr]; 191 if (exec->vtx.bufferobj->Name) { 201 arrays[attr].Ptr = (void *) data; 203 arrays[attr].Size = exec->vtx.attrsz[src]; 204 arrays[attr].StrideB = exec->vtx.vertex_size * sizeof(GLfloat); 205 arrays[attr].Stride = exec->vtx.vertex_size * sizeof(GLfloat); 206 arrays[attr].Type = GL_FLOAT; 207 arrays[attr].Enabled = 1; 205 arrays[attr].Stride = exec->vtx.vertex_size * sizeof(GLfloat); 208 _mesa_reference_buffer_object(ctx, _mesa_reference_buffer_object (ctx=0x880faf0, ptr=0x8857b00, bufObj=0x87ceb80) at main/bufferobj.c:186 186 { 187 if (*ptr == bufObj) 242 } vbo_exec_vtx_flush (exec=0x88566c8) at vbo/vbo_exec_draw.c:211 211 arrays[attr]._MaxElement = count; /* ??? */ 213 data += exec->vtx.attrsz[src] * sizeof(GLfloat); 211 arrays[attr]._MaxElement = count; /* ??? */ 213 data += exec->vtx.attrsz[src] * sizeof(GLfloat); 184 for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { 246 if (exec->vtx.bufferobj->Name) { 251 vbo_context(ctx)->draw_prims( ctx, _tnl_draw_prims (ctx=0x880faf0, arrays=0x8857c90, prim=0x88567ec, nr_prims=1, ib=0x0, min_index=0, max_index=3) at tnl/t_draw.c:347 347 TNLcontext *tnl = TNL_CONTEXT(ctx); 346 { 347 TNLcontext *tnl = TNL_CONTEXT(ctx); 362 if (min_index) { 347 TNLcontext *tnl = TNL_CONTEXT(ctx); 349 const GLint max = TEST_SPLIT ? 8 : tnl->vb.Size - MAX_CLIPPED_VERTICES; 362 if (min_index) { 370 else if (max_index > max) { 398 bind_inputs(ctx, arrays, max_index+1, bo, &nr_bo); 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 194 if (inputs[i]->BufferObj->Name) { 210 ptr = inputs[i]->Ptr; 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 218 _tnl_import_array(ctx, i, count, inputs[i], ptr); 103 GLuint stride = input->StrideB; 101 TNLcontext *tnl = TNL_CONTEXT(ctx); 103 GLuint stride = input->StrideB; 105 if (input->Type != GL_FLOAT) { 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 102 struct vertex_buffer *VB = &tnl->vb; 141 VB->AttribPtr[attrib] = &tnl->tmp_inputs[attrib]; 142 VB->AttribPtr[attrib]->data = (GLfloat (*)[4])ptr; 143 VB->AttribPtr[attrib]->start = (GLfloat *)ptr; 144 VB->AttribPtr[attrib]->count = count; 145 VB->AttribPtr[attrib]->stride = stride; 146 VB->AttribPtr[attrib]->size = input->Size; 150 VB->AttribPtr[attrib]->flags = (((1<Size)-1) | 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 154 VB->AttribPtr[attrib]->storage = NULL; 191 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 186 struct vertex_buffer *VB = &tnl->vb; 236 VB->FogCoordPtr = VB->AttribPtr[_TNL_ATTRIB_FOG]; 223 VB->Count = count; 186 struct vertex_buffer *VB = &tnl->vb; 228 VB->ObjPtr = VB->AttribPtr[_TNL_ATTRIB_POS]; 223 VB->Count = count; 231 VB->ColorPtr[1] = NULL; 233 VB->IndexPtr[1] = NULL; 228 VB->ObjPtr = VB->AttribPtr[_TNL_ATTRIB_POS]; 229 VB->NormalPtr = VB->AttribPtr[_TNL_ATTRIB_NORMAL]; 235 VB->SecondaryColorPtr[1] = NULL; 229 VB->NormalPtr = VB->AttribPtr[_TNL_ATTRIB_NORMAL]; 230 VB->ColorPtr[0] = VB->AttribPtr[_TNL_ATTRIB_COLOR0]; 232 VB->IndexPtr[0] = VB->AttribPtr[_TNL_ATTRIB_COLOR_INDEX]; 234 VB->SecondaryColorPtr[0] = VB->AttribPtr[_TNL_ATTRIB_COLOR1]; 236 VB->FogCoordPtr = VB->AttribPtr[_TNL_ATTRIB_FOG]; 239 VB->TexCoordPtr[i] = VB->AttribPtr[_TNL_ATTRIB_TEX0 + i]; 238 for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) { 239 VB->TexCoordPtr[i] = VB->AttribPtr[_TNL_ATTRIB_TEX0 + i]; 238 for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) { 239 VB->TexCoordPtr[i] = VB->AttribPtr[_TNL_ATTRIB_TEX0 + i]; 238 for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) { 246 if (ctx->Polygon.FrontMode != GL_FILL || 255 VB->EdgeFlag = NULL; 268 struct vertex_buffer *VB = &tnl->vb; 272 if (!ib) { 268 struct vertex_buffer *VB = &tnl->vb; 272 if (!ib) { 273 VB->Elts = NULL; 315 struct vertex_buffer *VB = &tnl->vb; 402 TNL_CONTEXT(ctx)->Driver.RunPipeline(ctx); 317 VB->Primitive = prim; 315 struct vertex_buffer *VB = &tnl->vb; 317 VB->Primitive = prim; 318 VB->PrimitiveCount = nr_prims; 402 TNL_CONTEXT(ctx)->Driver.RunPipeline(ctx); radeonWrapRunPipeline (ctx=0x880faf0) at radeon_state.c:2341 2341 if (rmesa->NewGLState) 2342 radeonValidateState( ctx ); radeonValidateState (ctx=0x880faf0) at radeon_state.c:2255 2255 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 2254 { 2255 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 2256 GLuint new_state = rmesa->NewGLState; 2258 if (new_state & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) { 2262 if (new_state & _NEW_TEXTURE) { 2263 radeonUpdateTextureState( ctx ); radeonUpdateTextureState (ctx=0x880faf0) at radeon_texstate.c:1364 1364 { 1368 ok = (radeonUpdateTextureUnit( ctx, 0 ) && 1365 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 1368 ok = (radeonUpdateTextureUnit( ctx, 0 ) && radeonUpdateTextureUnit (ctx=0x880faf0, unit=0) at radeon_texstate.c:1342 1342 if ( texUnit->_ReallyEnabled & (TEXTURE_RECT_BIT) ) { 1339 { 1342 if ( texUnit->_ReallyEnabled & (TEXTURE_RECT_BIT) ) { 1346 else if ( texUnit->_ReallyEnabled & (TEXTURE_1D_BIT | TEXTURE_2D_BIT) ) { 1164 struct gl_texture_object *tObj = texUnit->_Current; 1162 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 1164 struct gl_texture_object *tObj = texUnit->_Current; 1165 radeonTexObjPtr t = (radeonTexObjPtr) tObj->DriverData; 1169 if (t->pp_txformat & RADEON_TXFORMAT_NON_POWER2) { 1176 if ( t->base.dirty_images[0] ) { 1351 return (enable_tex_cube( ctx, unit ) && update_tex_common (ctx=0x880faf0, unit=0) at radeon_texstate.c:1260 1260 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 1259 { 1260 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 1261 struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit]; 1262 struct gl_texture_object *tObj = texUnit->_Current; 1263 radeonTexObjPtr t = (radeonTexObjPtr) tObj->DriverData; 1267 if ( tObj->Image[0][tObj->BaseLevel]->Border > 0 ) { 1272 if (unit != 0 && (t->pp_txfilter & RADEON_YUV_TO_RGB)) 1278 if ( rmesa->state.texture.unit[unit].texobj != t ) { 1279 if ( rmesa->state.texture.unit[unit].texobj != NULL ) { 1284 rmesa->state.texture.unit[unit].texobj->base.bound &= 1288 rmesa->state.texture.unit[unit].texobj = t; 1289 t->base.bound |= (1UL << unit); 1291 driUpdateTextureLRU( (driTextureObject *) t ); /* XXX: should be locked! */ 1288 rmesa->state.texture.unit[unit].texobj = t; 1289 t->base.bound |= (1UL << unit); 1290 t->dirty_state |= 1<heap; 231 if ( heap != NULL ) { 233 start = t->memBlock->ofs >> shift; 234 end = (t->memBlock->ofs + t->memBlock->size - 1) >> shift; 237 heap->local_age = ++heap->global_age[0]; 238 list = heap->global_regions; 244 move_to_head( & heap->texture_objects, t ); 248 list[i].age = heap->local_age; 252 list[(unsigned)list[i].next].prev = list[i].prev; 248 list[i].age = heap->local_age; 252 list[(unsigned)list[i].next].prev = list[i].prev; 253 list[(unsigned)list[i].prev].next = list[i].next; 257 list[i].prev = heap->nrRegions; 258 list[i].next = list[heap->nrRegions].next; 259 list[(unsigned)list[heap->nrRegions].next].prev = i; 260 list[heap->nrRegions].next = i; 247 for (i = start ; i <= end ; i++) { 248 list[i].age = heap->local_age; 252 list[(unsigned)list[i].next].prev = list[i].prev; 248 list[i].age = heap->local_age; 252 list[(unsigned)list[i].next].prev = list[i].prev; 253 list[(unsigned)list[i].prev].next = list[i].next; 257 list[i].prev = heap->nrRegions; 258 list[i].next = list[heap->nrRegions].next; 259 list[(unsigned)list[heap->nrRegions].next].prev = i; 260 list[heap->nrRegions].next = i; 247 for (i = start ; i <= end ; i++) { 248 list[i].age = heap->local_age; 252 list[(unsigned)list[i].next].prev = list[i].prev; 248 list[i].age = heap->local_age; 252 list[(unsigned)list[i].next].prev = list[i].prev; 253 list[(unsigned)list[i].prev].next = list[i].next; 257 list[i].prev = heap->nrRegions; 258 list[i].next = list[heap->nrRegions].next; 259 list[(unsigned)list[heap->nrRegions].next].prev = i; 260 list[heap->nrRegions].next = i; 247 for (i = start ; i <= end ; i++) { 248 list[i].age = heap->local_age; 252 list[(unsigned)list[i].next].prev = list[i].prev; 248 list[i].age = heap->local_age; 252 list[(unsigned)list[i].next].prev = list[i].prev; 253 list[(unsigned)list[i].prev].next = list[i].next; 257 list[i].prev = heap->nrRegions; 258 list[i].next = list[heap->nrRegions].next; 259 list[(unsigned)list[heap->nrRegions].next].prev = i; 260 list[heap->nrRegions].next = i; 247 for (i = start ; i <= end ; i++) { 268 } update_tex_common (ctx=0x880faf0, unit=0) at radeon_texstate.c:1297 1297 if ( !(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & (RADEON_TEX_0_ENABLE<dirty_state & (1<hw.tex[unit].cmd[TEX_CMD_0]; 910 GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT]; 909 int *cmd = &rmesa->hw.tex[unit].cmd[TEX_CMD_0]; 910 GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT]; 912 RADEON_STATECHANGE( rmesa, tex[unit] ); 914 cmd[TEX_PP_TXFILTER] &= ~TEXOBJ_TXFILTER_MASK; 915 cmd[TEX_PP_TXFILTER] |= texobj->pp_txfilter & TEXOBJ_TXFILTER_MASK; 916 cmd[TEX_PP_TXFORMAT] &= ~TEXOBJ_TXFORMAT_MASK; 917 cmd[TEX_PP_TXFORMAT] |= texobj->pp_txformat & TEXOBJ_TXFORMAT_MASK; 918 cmd[TEX_PP_TXOFFSET] = texobj->pp_txoffset; 919 cmd[TEX_PP_BORDER_COLOR] = texobj->pp_border_color; 921 if (texobj->base.tObj->Target == GL_TEXTURE_RECTANGLE_NV) { 929 se_coord_fmt &= ~(RADEON_VTX_ST0_NONPARAMETRIC << unit); 931 if (texobj->base.tObj->Target == GL_TEXTURE_CUBE_MAP) { 950 if (se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT]) { 955 texobj->dirty_state &= ~(1<NewGLState |= _NEW_TEXTURE_MATRIX; 1315 if (rmesa->recheck_texgen[unit]) { 1322 format = tObj->Image[0][tObj->BaseLevel]->_BaseFormat; 1323 if ( rmesa->state.texture.unit[unit].format != format || 1322 format = tObj->Image[0][tObj->BaseLevel]->_BaseFormat; 1323 if ( rmesa->state.texture.unit[unit].format != format || 1325 rmesa->state.texture.unit[unit].format = format; 1326 rmesa->state.texture.unit[unit].envMode = texUnit->EnvMode; 506 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 507 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit]; 506 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 507 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit]; 506 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 520 assert( (texUnit->_ReallyEnabled == 0) 523 if ( RADEON_DEBUG & DEBUG_TEXTURE ) { 536 rmesa->state.texture.unit[unit].format = 0; 537 rmesa->state.texture.unit[unit].envMode = 0; 539 if ( !texUnit->_ReallyEnabled ) { 546 const GLuint numColorArgs = texUnit->_CurrentCombine->_NumArgsRGB; 549 GLuint Ashift = texUnit->_CurrentCombine->ScaleShiftA; 547 const GLuint numAlphaArgs = texUnit->_CurrentCombine->_NumArgsA; 548 GLuint RGBshift = texUnit->_CurrentCombine->ScaleShiftRGB; 549 GLuint Ashift = texUnit->_CurrentCombine->ScaleShiftA; 557 const GLuint srcRGBi = texUnit->_CurrentCombine->SourceRGB[i]; 558 assert(op >= 0); 559 assert(op <= 3); 560 switch ( srcRGBi ) { 562 if (texUnit->_Current->Image[0][0]->_BaseFormat == GL_ALPHA) 565 color_arg[i] = radeon_texture_color[op][unit]; 555 for ( i = 0 ; i < numColorArgs ; i++ ) { 557 const GLuint srcRGBi = texUnit->_CurrentCombine->SourceRGB[i]; 558 assert(op >= 0); 559 assert(op <= 3); 560 switch ( srcRGBi ) { 574 color_arg[i] = radeon_previous_color[op]; 555 for ( i = 0 ; i < numColorArgs ; i++ ) { 602 const GLuint srcAi = texUnit->_CurrentCombine->SourceA[i]; 603 assert(op >= 0); 604 assert(op <= 1); 605 switch ( srcAi ) { 607 if (texUnit->_Current->Image[0][0]->_BaseFormat == GL_LUMINANCE) 610 alpha_arg[i] = radeon_texture_alpha[op][unit]; 600 for ( i = 0 ; i < numAlphaArgs ; i++ ) { 602 const GLuint srcAi = texUnit->_CurrentCombine->SourceA[i]; 603 assert(op >= 0); 604 assert(op <= 1); 605 switch ( srcAi ) { 619 alpha_arg[i] = radeon_previous_alpha[op]; 600 for ( i = 0 ; i < numAlphaArgs ; i++ ) { 645 switch ( texUnit->_CurrentCombine->ModeRGB ) { 657 RADEON_COLOR_ARG( 0, A ); 658 RADEON_COLOR_ARG( 1, B ); 749 switch ( texUnit->_CurrentCombine->ModeA ) { 761 RADEON_ALPHA_ARG( 0, A ); 762 RADEON_ALPHA_ARG( 1, B ); 821 if ( (texUnit->_CurrentCombine->ModeRGB == GL_DOT3_RGB_EXT) 829 color_combine |= (RGBshift << RADEON_SCALE_SHIFT); 830 alpha_combine |= (Ashift << RADEON_SCALE_SHIFT); 829 color_combine |= (RGBshift << RADEON_SCALE_SHIFT); 830 alpha_combine |= (Ashift << RADEON_SCALE_SHIFT); 836 if ( rmesa->hw.tex[unit].cmd[TEX_PP_TXCBLEND] != color_combine || 1332 FALLBACK( rmesa, RADEON_FALLBACK_BORDER_MODE, t->border_fallback ); radeonFallback (ctx=0x880faf0, bit=128, mode=0 '\0') at radeon_swtcl.c:798 798 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 797 { 802 if (mode) { 798 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 799 TNLcontext *tnl = TNL_CONTEXT(ctx); 800 GLuint oldfallback = rmesa->Fallback; 802 if (mode) { 816 rmesa->Fallback &= ~bit; 817 if (oldfallback == bit) { 846 } update_tex_common (ctx=0x880faf0, unit=0) at radeon_texstate.c:1333 1333 return !t->border_fallback; 1334 } radeonUpdateTextureUnit (ctx=0x880faf0, unit=0) at radeon_texstate.c:1361 1361 } radeonUpdateTextureUnit (ctx=0x880faf0, unit=1) at radeon_texstate.c:1342 1342 if ( texUnit->_ReallyEnabled & (TEXTURE_RECT_BIT) ) { 1339 { 1342 if ( texUnit->_ReallyEnabled & (TEXTURE_RECT_BIT) ) { 1346 else if ( texUnit->_ReallyEnabled & (TEXTURE_1D_BIT | TEXTURE_2D_BIT) ) { 1350 else if ( texUnit->_ReallyEnabled & (TEXTURE_CUBE_BIT) ) { 1354 else if ( texUnit->_ReallyEnabled ) { 1106 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 1108 if (rmesa->hw.ctx.cmd[CTX_PP_CNTL] & (RADEON_TEX_0_ENABLE<NewGLState |= _NEW_TEXTURE_MATRIX; 1361 } radeonUpdateTextureUnit (ctx=0x880faf0, unit=2) at radeon_texstate.c:1342 1342 if ( texUnit->_ReallyEnabled & (TEXTURE_RECT_BIT) ) { 1339 { 1342 if ( texUnit->_ReallyEnabled & (TEXTURE_RECT_BIT) ) { 1346 else if ( texUnit->_ReallyEnabled & (TEXTURE_1D_BIT | TEXTURE_2D_BIT) ) { 1350 else if ( texUnit->_ReallyEnabled & (TEXTURE_CUBE_BIT) ) { 1354 else if ( texUnit->_ReallyEnabled ) { 1106 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 1108 if (rmesa->hw.ctx.cmd[CTX_PP_CNTL] & (RADEON_TEX_0_ENABLE<NewGLState |= _NEW_TEXTURE_MATRIX; 1361 } radeonUpdateTextureState (ctx=0x880faf0) at radeon_texstate.c:1372 1372 FALLBACK( rmesa, RADEON_FALLBACK_TEXTURE, !ok ); radeonFallback (ctx=0x880faf0, bit=1, mode=0 '\0') at radeon_swtcl.c:798 798 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 797 { 802 if (mode) { 798 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 799 TNLcontext *tnl = TNL_CONTEXT(ctx); 800 GLuint oldfallback = rmesa->Fallback; 802 if (mode) { 816 rmesa->Fallback &= ~bit; 817 if (oldfallback == bit) { 846 } radeonUpdateTextureState (ctx=0x880faf0) at radeon_texstate.c:1374 1374 if (rmesa->TclFallback) 1375 radeonChooseVertexState( ctx ); radeonChooseVertexState (ctx=0x880faf0) at radeon_swtcl.c:244 244 radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); 245 TNLcontext *tnl = TNL_CONTEXT(ctx); 247 GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT]; 257 if (rmesa->Fallback != 0) 247 GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT]; 257 if (rmesa->Fallback != 0) 249 se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 | 264 if ((!RENDERINPUTS_TEST_RANGE( tnl->render_inputs_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX ) && 273 se_coord_fmt |= (RADEON_VTX_W0_IS_NOT_1_OVER_W0); 272 rmesa->swtcl.needproj = GL_FALSE; 273 se_coord_fmt |= (RADEON_VTX_W0_IS_NOT_1_OVER_W0); 276 _tnl_need_projected_coords( ctx, rmesa->swtcl.needproj ); _tnl_need_projected_coords (ctx=0x880faf0, mode=0 '\0') at tnl/t_context.c:201 201 tnl->NeedNdcCoords = mode; 202 } radeonChooseVertexState (ctx=0x880faf0) at radeon_swtcl.c:278 278 if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) { 282 } radeonUpdateTextureState (ctx=0x880faf0) at radeon_texstate.c:1376 1376 } radeonValidateState (ctx=0x880faf0) at radeon_state.c:2264 2264 new_state |= rmesa->NewGLState; /* may add TEXTURE_MATRIX */ 2269 if (new_state & (_NEW_MODELVIEW|_NEW_PROJECTION)) 2270 upload_matrix( rmesa, ctx->_ModelProjectMatrix.m, MODEL_PROJ ); upload_matrix (rmesa=0x8807fe0, src=0x8841950, idx=) at radeon_state.c:2124 2124 float *dest = ((float *)RADEON_DB_STATE( mat[idx] ))+MAT_ELT_0; 2123 { 2124 float *dest = ((float *)RADEON_DB_STATE( mat[idx] ))+MAT_ELT_0; 2123 { 2124 float *dest = ((float *)RADEON_DB_STATE( mat[idx] ))+MAT_ELT_0; 2129 *dest++ = src[i]; 2130 *dest++ = src[i+4]; 2131 *dest++ = src[i+8]; 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2129 *dest++ = src[i]; 2130 *dest++ = src[i+4]; 2131 *dest++ = src[i+8]; 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2129 *dest++ = src[i]; 2130 *dest++ = src[i+4]; 2131 *dest++ = src[i+8]; 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2129 *dest++ = src[i]; 2130 *dest++ = src[i+4]; 2131 *dest++ = src[i+8]; 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2135 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.mat[idx] ); 2136 } 2135 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.mat[idx] ); 2136 } upload_matrix (rmesa=0x8807fe0, src=, idx=-1073749876) at radeon_state.c:2135 2135 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.mat[idx] ); RADEON_DB_STATECHANGE (rmesa=0x8807fe0, atom=0x8808224) at radeon_ioctl.h:129 129 { 130 if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) { 129 { 130 if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) { 132 RADEON_NEWPRIM( rmesa ); 133 atom->dirty = GL_TRUE; 134 rmesa->hw.is_dirty = GL_TRUE; 135 tmp = atom->cmd; 136 atom->cmd = atom->lastcmd; 137 atom->lastcmd = tmp; 136 atom->cmd = atom->lastcmd; 142 } radeonValidateState (ctx=0x880faf0) at radeon_state.c:2274 2274 if (new_state & (_NEW_MODELVIEW)) { 2275 upload_matrix( rmesa, ctx->ModelviewMatrixStack.Top->m, MODEL ); upload_matrix (rmesa=0x8807fe0, src=0x8832730, idx=) at radeon_state.c:2124 2124 float *dest = ((float *)RADEON_DB_STATE( mat[idx] ))+MAT_ELT_0; 2123 { 2124 float *dest = ((float *)RADEON_DB_STATE( mat[idx] ))+MAT_ELT_0; 2123 { 2124 float *dest = ((float *)RADEON_DB_STATE( mat[idx] ))+MAT_ELT_0; 2129 *dest++ = src[i]; 2130 *dest++ = src[i+4]; 2131 *dest++ = src[i+8]; 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2129 *dest++ = src[i]; 2130 *dest++ = src[i+4]; 2131 *dest++ = src[i+8]; 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2129 *dest++ = src[i]; 2130 *dest++ = src[i+4]; 2131 *dest++ = src[i+8]; 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2129 *dest++ = src[i]; 2130 *dest++ = src[i+4]; 2131 *dest++ = src[i+8]; 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2132 *dest++ = src[i+12]; 2128 for (i = 0 ; i < 4 ; i++) { 2135 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.mat[idx] ); 2136 } 2135 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.mat[idx] ); 2136 } upload_matrix (rmesa=0x8807fe0, src=, idx=-1073749876) at radeon_state.c:2135 2135 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.mat[idx] ); RADEON_DB_STATECHANGE (rmesa=0x8807fe0, atom=0x8808248) at radeon_ioctl.h:129 129 { 130 if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) { 129 { 130 if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) { 142 } radeonValidateState (ctx=0x880faf0) at radeon_state.c:2276 2276 upload_matrix_t( rmesa, ctx->ModelviewMatrixStack.Top->inv, MODEL_IT ); 2140 float *dest = ((float *)RADEON_DB_STATE( mat[idx] ))+MAT_ELT_0; 2276 upload_matrix_t( rmesa, ctx->ModelviewMatrixStack.Top->inv, MODEL_IT ); 2140 float *dest = ((float *)RADEON_DB_STATE( mat[idx] ))+MAT_ELT_0; 2141 memcpy(dest, src, 16*sizeof(float)); 2142 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.mat[idx] ); RADEON_DB_STATECHANGE (rmesa=0x8807fe0, atom=0x880826c) at radeon_ioctl.h:130 130 if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) { 129 { 130 if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) { 142 } radeonValidateState (ctx=0x880faf0) at radeon_state.c:2282 2282 if (new_state & _NEW_TEXTURE_MATRIX) { 2148 radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); 2154 rmesa->TexMatColSwap = 0; 2148 radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); 2150 GLuint vs = rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL]; 2153 rmesa->NeedTexMatrix = 0; 2154 rmesa->TexMatColSwap = 0; 2157 if (ctx->Texture.Unit[unit]._ReallyEnabled) { 2159 if (ctx->TextureMatrixStack[unit].Top->type != MATRIX_IDENTITY) { 2177 else if (rmesa->TexGenEnabled & (RADEON_TEXMAT_0_ENABLE << unit)) { 2156 for (unit = 0 ; unit < ctx->Const.MaxTextureUnits; unit++) { 2157 if (ctx->Texture.Unit[unit]._ReallyEnabled) { 2156 for (unit = 0 ; unit < ctx->Const.MaxTextureUnits; unit++) { 2157 if (ctx->Texture.Unit[unit]._ReallyEnabled) { 2156 for (unit = 0 ; unit < ctx->Const.MaxTextureUnits; unit++) { 2189 tpc = (texMatEnabled | rmesa->TexGenEnabled); 2196 vs |= (((tpc & RADEON_TEXGEN_TEXMAT_0_ENABLE) << 2189 tpc = (texMatEnabled | rmesa->TexGenEnabled); 2196 vs |= (((tpc & RADEON_TEXGEN_TEXMAT_0_ENABLE) << 2203 if (tpc != rmesa->hw.tcl.cmd[TCL_TEXTURE_PROC_CTL] || 2196 vs |= (((tpc & RADEON_TEXGEN_TEXMAT_0_ENABLE) << 2203 if (tpc != rmesa->hw.tcl.cmd[TCL_TEXTURE_PROC_CTL] || 2286 if (new_state & (_NEW_LIGHT|_NEW_MODELVIEW|_MESA_NEW_NEED_EYE_COORDS)) { 981 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 990 if (ctx->_NeedEyeCoords) 988 GLuint tmp = rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL]; 990 if (ctx->_NeedEyeCoords) 993 tmp |= RADEON_LIGHT_IN_MODELSPACE; 999 if (tmp != rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL]) 1007 GLfloat *fcmd = (GLfloat *)RADEON_DB_STATE( eye ); 1008 fcmd[EYE_X] = ctx->_EyeZDir[0]; 1009 fcmd[EYE_Y] = ctx->_EyeZDir[1]; 1010 fcmd[EYE_Z] = - ctx->_EyeZDir[2]; 1011 fcmd[EYE_RESCALE_FACTOR] = ctx->_ModelViewInvScale; 1012 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.eye ); RADEON_DB_STATECHANGE (rmesa=0x8807fe0, atom=0x88084f4) at radeon_ioctl.h:130 130 if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) { 129 { 130 if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) { 142 } radeonValidateState (ctx=0x880faf0) at radeon_state.c:1017 1017 if (ctx->Light.Enabled) { 2292 if (new_state & (_NEW_PROJECTION)) { 2293 if (ctx->Transform.ClipPlanesEnabled) 2298 rmesa->NewGLState = 0; 2299 } radeonWrapRunPipeline (ctx=0x880faf0) at radeon_state.c:2344 2344 has_material = (ctx->Light.Enabled && check_material( ctx )); 2352 _tnl_run_pipeline( ctx ); _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:123 123 TNLcontext *tnl = TNL_CONTEXT(ctx); 122 { 123 TNLcontext *tnl = TNL_CONTEXT(ctx); 127 if (!tnl->vb.Count) 128 return; 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 83 tnl->pipeline.last_attrib_size[i] = tnl->vb.AttribPtr[i]->size; 84 tnl->pipeline.last_attrib_stride[i] = tnl->vb.AttribPtr[i]->stride; 85 tnl->pipeline.input_changes |= 1<vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 83 tnl->pipeline.last_attrib_size[i] = tnl->vb.AttribPtr[i]->size; 84 tnl->pipeline.last_attrib_stride[i] = tnl->vb.AttribPtr[i]->stride; 85 tnl->pipeline.input_changes |= 1<vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 83 tnl->pipeline.last_attrib_size[i] = tnl->vb.AttribPtr[i]->size; 84 tnl->pipeline.last_attrib_stride[i] = tnl->vb.AttribPtr[i]->stride; 85 tnl->pipeline.input_changes |= 1<vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 81 if (tnl->vb.AttribPtr[i]->size != tnl->pipeline.last_attrib_size[i] || 80 for (i = 0; i <= _TNL_LAST_MAT; i++) { 89 if (tnl->pipeline.input_changes && 134 if (ctx->VertexProgram._MaintainTnlProgram) 139 if (s->validate) 137 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 139 if (s->validate) 137 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 139 if (s->validate) 140 s->validate( ctx, s ); validate_normal_stage (ctx=0x880faf0, stage=0x8869a1c) at tnl/t_vb_normals.c:96 96 struct normal_stage_data *store = NORMAL_STAGE_DATA(stage); 95 { 98 if (ctx->VertexProgram._Current || 96 struct normal_stage_data *store = NORMAL_STAGE_DATA(stage); 98 if (ctx->VertexProgram._Current || 141 store->NormalTransform = NULL; 144 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:137 137 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 139 if (s->validate) 140 s->validate( ctx, s ); validate_lighting (ctx=0x880faf0, stage=0x8869a34) at tnl/t_vb_light.c:264 264 if (!ctx->Light.Enabled || ctx->VertexProgram._Current) 290 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:137 137 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 139 if (s->validate) 137 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 139 if (s->validate) 140 s->validate( ctx, s ); validate_texgen_stage (ctx=0x880faf0, stage=0x8869a64) at tnl/t_vb_texgen.c:513 513 struct texgen_stage_data *store = TEXGEN_STAGE_DATA(stage); 512 { 513 struct texgen_stage_data *store = TEXGEN_STAGE_DATA(stage); 516 if (!ctx->Texture._TexGenEnabled || ctx->VertexProgram._Current) 512 { 516 if (!ctx->Texture._TexGenEnabled || ctx->VertexProgram._Current) 552 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:137 137 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 139 if (s->validate) 137 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 139 if (s->validate) 137 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 139 if (s->validate) 137 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 151 _tnl_notify_pipeline_output_change( ctx ); 143 tnl->pipeline.new_state = 0; 144 tnl->pipeline.input_changes = 0; 151 _tnl_notify_pipeline_output_change( ctx ); _tnl_notify_pipeline_output_change (ctx=0x880faf0) at tnl/t_vertex.c:374 374 { 282 vtx->emit = choose_emit_func; 374 { 282 vtx->emit = choose_emit_func; 283 vtx->interp = choose_interp_func; 284 vtx->copy_pv = choose_copy_pv_func; 285 vtx->new_inputs = ~0; 377 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:154 154 START_FAST_MATH(__tmp); 158 if (!s->run( ctx, s )) radeon_run_tcl_render (ctx=0x880faf0, stage=0x88699ec) at radeon_tcl.c:374 374 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 375 TNLcontext *tnl = TNL_CONTEXT(ctx); 373 { 374 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 382 if (rmesa->TclFallback) 375 TNLcontext *tnl = TNL_CONTEXT(ctx); 382 if (rmesa->TclFallback) 434 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:156 156 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 158 if (!s->run( ctx, s )) run_vertex_stage (ctx=0x880faf0, stage=0x8869a04) at tnl/t_vb_vertex.c:125 125 struct vertex_stage_data *store = (struct vertex_stage_data *)stage->privatePtr; 129 if (ctx->VertexProgram._Current) 124 { 125 struct vertex_stage_data *store = (struct vertex_stage_data *)stage->privatePtr; 126 TNLcontext *tnl = TNL_CONTEXT(ctx); 129 if (ctx->VertexProgram._Current) 127 struct vertex_buffer *VB = &tnl->vb; 132 if (ctx->_NeedEyeCoords) { 144 VB->ClipPtr = TransformRaw( &store->clip, transform_points2_general (to_vec=0x886a7ec, m=0x8841950, from_vec=0x886a3c8) at math/m_xform_tmp.h:243 243 { 244 const GLuint stride = from_vec->stride; 247 GLuint count = from_vec->count; 245 GLfloat *from = from_vec->start; 247 GLuint count = from_vec->count; 251 const GLfloat m3 = m[3], m7 = m[7], m15 = m[15]; 248 const GLfloat m0 = m[0], m4 = m[4], m12 = m[12]; 249 const GLfloat m1 = m[1], m5 = m[5], m13 = m[13]; 250 const GLfloat m2 = m[2], m6 = m[6], m14 = m[14]; 251 const GLfloat m3 = m[3], m7 = m[7], m15 = m[15]; 254 const GLfloat ox = from[0], oy = from[1]; 253 STRIDE_LOOP { 254 const GLfloat ox = from[0], oy = from[1]; 255 to[i][0] = m0 * ox + m4 * oy + m12; 256 to[i][1] = m1 * ox + m5 * oy + m13; 257 to[i][2] = m2 * ox + m6 * oy + m14; 258 to[i][3] = m3 * ox + m7 * oy + m15; 253 STRIDE_LOOP { 254 const GLfloat ox = from[0], oy = from[1]; 253 STRIDE_LOOP { 254 const GLfloat ox = from[0], oy = from[1]; 255 to[i][0] = m0 * ox + m4 * oy + m12; 256 to[i][1] = m1 * ox + m5 * oy + m13; 257 to[i][2] = m2 * ox + m6 * oy + m14; 258 to[i][3] = m3 * ox + m7 * oy + m15; 253 STRIDE_LOOP { 254 const GLfloat ox = from[0], oy = from[1]; 253 STRIDE_LOOP { 254 const GLfloat ox = from[0], oy = from[1]; 255 to[i][0] = m0 * ox + m4 * oy + m12; 256 to[i][1] = m1 * ox + m5 * oy + m13; 257 to[i][2] = m2 * ox + m6 * oy + m14; 258 to[i][3] = m3 * ox + m7 * oy + m15; 253 STRIDE_LOOP { 254 const GLfloat ox = from[0], oy = from[1]; 253 STRIDE_LOOP { 254 const GLfloat ox = from[0], oy = from[1]; 255 to[i][0] = m0 * ox + m4 * oy + m12; 256 to[i][1] = m1 * ox + m5 * oy + m13; 257 to[i][2] = m2 * ox + m6 * oy + m14; 258 to[i][3] = m3 * ox + m7 * oy + m15; 253 STRIDE_LOOP { 262 to_vec->count = from_vec->count; 261 to_vec->flags |= VEC_SIZE_4; 260 to_vec->size = 4; 262 to_vec->count = from_vec->count; 263 } run_vertex_stage (ctx=0x880faf0, stage=0x8869a04) at tnl/t_vb_vertex.c:150 150 switch (VB->ClipPtr->size) { 144 VB->ClipPtr = TransformRaw( &store->clip, 150 switch (VB->ClipPtr->size) { 167 store->ormask = 0; 168 store->andmask = CLIP_FRUSTUM_BITS; 170 if (tnl->NeedNdcCoords) { 179 VB->NdcPtr = NULL; 180 _mesa_clip_np_tab[VB->ClipPtr->size]( VB->ClipPtr, 179 VB->NdcPtr = NULL; 180 _mesa_clip_np_tab[VB->ClipPtr->size]( VB->ClipPtr, cliptest_np_points4 (clip_vec=0x886a7ec, proj_vec=0x0, clipMask=0x8875820 "\004\004\b\b", orMask=0x886a828 "", andMask=0x886a829 "?") at math/m_clip_tmp.h:129 129 GLubyte tmpOrMask = *orMask; 123 { 124 const GLuint stride = clip_vec->stride; 125 const GLuint count = clip_vec->count; 126 const GLfloat *from = (GLfloat *)clip_vec->start; 124 const GLuint stride = clip_vec->stride; 128 GLubyte tmpAndMask = *andMask; 125 const GLuint count = clip_vec->count; 128 GLubyte tmpAndMask = *andMask; 129 GLubyte tmpOrMask = *orMask; 126 const GLfloat *from = (GLfloat *)clip_vec->start; 129 GLubyte tmpOrMask = *orMask; 133 const GLfloat cx = from[0]; 134 const GLfloat cy = from[1]; 135 const GLfloat cz = from[2]; 136 const GLfloat cw = from[3]; 149 if ( cx + cw < 0) mask |= CLIP_LEFT_BIT; 136 const GLfloat cw = from[3]; 149 if ( cx + cw < 0) mask |= CLIP_LEFT_BIT; 150 if (-cy + cw < 0) mask |= CLIP_TOP_BIT; 151 if ( cy + cw < 0) mask |= CLIP_BOTTOM_BIT; 152 if (-cz + cw < 0) mask |= CLIP_FAR_BIT; 153 if ( cz + cw < 0) mask |= CLIP_NEAR_BIT; 157 if (mask) { 156 clipMask[i] = mask; 157 if (mask) { 132 STRIDE_LOOP { 133 const GLfloat cx = from[0]; 134 const GLfloat cy = from[1]; 135 const GLfloat cz = from[2]; 136 const GLfloat cw = from[3]; 149 if ( cx + cw < 0) mask |= CLIP_LEFT_BIT; 136 const GLfloat cw = from[3]; 149 if ( cx + cw < 0) mask |= CLIP_LEFT_BIT; 150 if (-cy + cw < 0) mask |= CLIP_TOP_BIT; 151 if ( cy + cw < 0) mask |= CLIP_BOTTOM_BIT; 152 if (-cz + cw < 0) mask |= CLIP_FAR_BIT; 153 if ( cz + cw < 0) mask |= CLIP_NEAR_BIT; 157 if (mask) { 156 clipMask[i] = mask; 157 if (mask) { 132 STRIDE_LOOP { 133 const GLfloat cx = from[0]; 134 const GLfloat cy = from[1]; 135 const GLfloat cz = from[2]; 136 const GLfloat cw = from[3]; 149 if ( cx + cw < 0) mask |= CLIP_LEFT_BIT; 136 const GLfloat cw = from[3]; 149 if ( cx + cw < 0) mask |= CLIP_LEFT_BIT; 150 if (-cy + cw < 0) mask |= CLIP_TOP_BIT; 151 if ( cy + cw < 0) mask |= CLIP_BOTTOM_BIT; 152 if (-cz + cw < 0) mask |= CLIP_FAR_BIT; 153 if ( cz + cw < 0) mask |= CLIP_NEAR_BIT; 157 if (mask) { 156 clipMask[i] = mask; 157 if (mask) { 132 STRIDE_LOOP { 133 const GLfloat cx = from[0]; 134 const GLfloat cy = from[1]; 135 const GLfloat cz = from[2]; 136 const GLfloat cw = from[3]; 149 if ( cx + cw < 0) mask |= CLIP_LEFT_BIT; 136 const GLfloat cw = from[3]; 149 if ( cx + cw < 0) mask |= CLIP_LEFT_BIT; 150 if (-cy + cw < 0) mask |= CLIP_TOP_BIT; 151 if ( cy + cw < 0) mask |= CLIP_BOTTOM_BIT; 152 if (-cz + cw < 0) mask |= CLIP_FAR_BIT; 153 if ( cz + cw < 0) mask |= CLIP_NEAR_BIT; 157 if (mask) { 156 clipMask[i] = mask; 157 if (mask) { 132 STRIDE_LOOP { 164 *orMask = tmpOrMask; 165 *andMask = (GLubyte) (c < count ? 0 : tmpAndMask); 167 } run_vertex_stage (ctx=0x880faf0, stage=0x8869a04) at tnl/t_vb_vertex.c:187 187 if (store->andmask) 194 if (ctx->Transform.ClipPlanesEnabled) { 205 VB->ClipAndMask = store->andmask; 206 VB->ClipOrMask = store->ormask; 207 VB->ClipMask = store->clipmask; 210 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:156 156 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 158 if (!s->run( ctx, s )) run_normal_stage (ctx=0x880faf0, stage=0x8869a1c) at tnl/t_vb_normals.c:53 53 struct normal_stage_data *store = NORMAL_STAGE_DATA(stage); 54 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 52 { 53 struct normal_stage_data *store = NORMAL_STAGE_DATA(stage); 54 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 57 if (!store->NormalTransform) 86 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:156 156 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 158 if (!s->run( ctx, s )) run_lighting (ctx=0x880faf0, stage=0x8869a34) at tnl/t_vb_light.c:202 202 struct vertex_buffer *VB = &tnl->vb; 200 struct light_stage_data *store = LIGHT_STAGE_DATA(stage); 199 { 202 struct vertex_buffer *VB = &tnl->vb; 200 struct light_stage_data *store = LIGHT_STAGE_DATA(stage); 202 struct vertex_buffer *VB = &tnl->vb; 203 GLvector4f *input = ctx->_NeedEyeCoords ? VB->EyePtr : VB->ObjPtr; 206 if (!ctx->Light.Enabled || ctx->VertexProgram._Current) 254 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:156 156 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 158 if (!s->run( ctx, s )) run_fog_stage (ctx=0x880faf0, stage=0x8869a4c) at tnl/t_vb_fog.c:146 146 TNLcontext *tnl = TNL_CONTEXT(ctx); 152 if (!ctx->Fog.Enabled) 145 { 146 TNLcontext *tnl = TNL_CONTEXT(ctx); 148 struct fog_stage_data *store = FOG_STAGE_DATA(stage); 152 if (!ctx->Fog.Enabled) 232 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:156 156 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 158 if (!s->run( ctx, s )) run_texgen_stage (ctx=0x880faf0, stage=0x8869a64) at tnl/t_vb_texgen.c:487 487 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 488 struct texgen_stage_data *store = TEXGEN_STAGE_DATA(stage); 487 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 488 struct texgen_stage_data *store = TEXGEN_STAGE_DATA(stage); 491 if (!ctx->Texture._TexGenEnabled || ctx->VertexProgram._Current) 507 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:156 156 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 158 if (!s->run( ctx, s )) run_texmat_stage (ctx=0x880faf0, stage=0x8869a7c) at tnl/t_vb_texmat.c:61 61 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 60 struct texmat_stage_data *store = TEXMAT_STAGE_DATA(stage); 59 { 61 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 60 struct texmat_stage_data *store = TEXMAT_STAGE_DATA(stage); 61 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 64 if (!ctx->Texture._TexMatEnabled || ctx->VertexProgram._Current) 61 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 64 if (!ctx->Texture._TexMatEnabled || ctx->VertexProgram._Current) 82 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:156 156 for (i = 0; i < tnl->pipeline.nr_stages ; i++) { 158 if (!s->run( ctx, s )) radeon_run_render (ctx=0x880faf0, stage=0x8869a94) at radeon_swtcl.c:422 422 TNLcontext *tnl = TNL_CONTEXT(ctx); 430 if (rmesa->swtcl.RenderIndex != 0 || 420 { 422 TNLcontext *tnl = TNL_CONTEXT(ctx); 430 if (rmesa->swtcl.RenderIndex != 0 || 423 struct vertex_buffer *VB = &tnl->vb; 1172 if (VB->ClipOrMask & ~CLIP_CULL_BIT) 1175 if (VB->Elts && !HAVE_ELTS) 1178 for (i = 0 ; i < VB->PrimitiveCount ; i++) { 1179 GLuint prim = VB->Primitive[i].mode; 1183 if (!count) 1179 GLuint prim = VB->Primitive[i].mode; 1183 if (!count) 1186 switch (prim & PRIM_MODE_MASK) { 1178 for (i = 0 ; i < VB->PrimitiveCount ; i++) { 434 tnl->Driver.Render.Start( ctx ); radeonRenderStart (ctx=0x880faf0) at radeon_swtcl.c:227 227 radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); 226 { 227 radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); 91 TNLcontext *tnl = TNL_CONTEXT(ctx); 92 struct vertex_buffer *VB = &tnl->vb; 97 RENDERINPUTS_COPY( index_bitset, tnl->render_inputs_bitset ); 101 if ( VB->NdcPtr != NULL ) { 105 VB->AttribPtr[VERT_ATTRIB_POS] = VB->ClipPtr; 108 assert( VB->AttribPtr[VERT_ATTRIB_POS] != NULL ); 105 VB->AttribPtr[VERT_ATTRIB_POS] = VB->ClipPtr; 108 assert( VB->AttribPtr[VERT_ATTRIB_POS] != NULL ); 97 RENDERINPUTS_COPY( index_bitset, tnl->render_inputs_bitset ); 114 if ( !rmesa->swtcl.needproj || 109 rmesa->swtcl.vertex_attr_count = 0; 114 if ( !rmesa->swtcl.needproj || 116 EMIT_ATTR( _TNL_ATTRIB_POS, EMIT_4F, 128 EMIT_ATTR( _TNL_ATTRIB_COLOR0, EMIT_4UB_4F_RGBA, 126 rmesa->swtcl.coloroffset = offset; 128 EMIT_ATTR( _TNL_ATTRIB_COLOR0, EMIT_4UB_4F_RGBA, 136 rmesa->swtcl.specoffset = 0; 128 EMIT_ATTR( _TNL_ATTRIB_COLOR0, EMIT_4UB_4F_RGBA, 137 if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_COLOR1 ) || 177 if (RENDERINPUTS_TEST_RANGE( index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX )) { 180 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) { 181 if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_TEX(i) )) { 184 switch (sz) { 187 EMIT_ATTR( _TNL_ATTRIB_TEX0+i, EMIT_2F, 180 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) { 181 if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_TEX(i) )) { 180 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) { 181 if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_TEX(i) )) { 180 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) { 207 if (!RENDERINPUTS_EQUAL( rmesa->tnl_index_bitset, index_bitset ) || 231 if (rmesa->dma.flush != 0 && 234 } radeon_run_render (ctx=0x880faf0, stage=0x8869a94) at radeon_swtcl.c:436 436 for (i = 0 ; i < VB->PrimitiveCount ; i++) 434 tnl->Driver.Render.Start( ctx ); 438 GLuint prim = VB->Primitive[i].mode; 439 GLuint start = VB->Primitive[i].start; 440 GLuint length = VB->Primitive[i].count; 442 if (!length) 440 GLuint length = VB->Primitive[i].count; 442 if (!length) 445 if (RADEON_DEBUG & DEBUG_PRIMS) 438 GLuint prim = VB->Primitive[i].mode; 445 if (RADEON_DEBUG & DEBUG_PRIMS) 451 tab[prim & PRIM_MODE_MASK]( ctx, start, start + length, prim ); radeon_dma_render_quads_verts (ctx=0x880faf0, start=0, count=4, flags=7) at ../../../../../src/mesa/tnl_dd/t_dd_dmatmp.h:623 623 LOCAL_VARS; 543 { 623 LOCAL_VARS; 626 INIT(GL_TRIANGLES); radeonDmaPrimitive (rmesa=0x8807fe0, prim=4) at radeon_swtcl.c:392 392 RADEON_NEWPRIM( rmesa ); 391 { 392 RADEON_NEWPRIM( rmesa ); 393 rmesa->swtcl.hw_primitive = hw_prim[prim]; 394 assert(rmesa->dma.current.ptr == rmesa->dma.current.start); 395 } radeon_dma_render_quads_verts (ctx=0x880faf0, start=0, count=4, flags=7) at ../../../../../src/mesa/tnl_dd/t_dd_dmatmp.h:628 628 for (j = start; j < count-3; j += 4) { 629 void *tmp = ALLOC_VERTS( 6 ); radeonAllocDmaLowVerts (rmesa=0x8807fe0, nverts=6, vsize=28) at radeon_swtcl.c:331 331 GLuint bytes = vsize * nverts; 330 { 333 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end ) 331 GLuint bytes = vsize * nverts; 333 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end ) 336 if (!rmesa->dma.flush) { 337 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES; 338 rmesa->dma.flush = flush_last_swtcl_prim; 341 assert( vsize == rmesa->swtcl.vertex_size * 4 ); 342 assert( rmesa->dma.flush == flush_last_swtcl_prim ); 343 assert (rmesa->dma.current.start + 349 GLubyte *head = (GLubyte *)(rmesa->dma.current.address + rmesa->dma.current.ptr); 350 rmesa->dma.current.ptr += bytes; 349 GLubyte *head = (GLubyte *)(rmesa->dma.current.address + rmesa->dma.current.ptr); 350 rmesa->dma.current.ptr += bytes; 351 rmesa->swtcl.numverts += nverts; 355 } radeon_dma_render_quads_verts (ctx=0x880faf0, start=0, count=4, flags=7) at ../../../../../src/mesa/tnl_dd/t_dd_dmatmp.h:632 632 tmp = EMIT_VERTS(ctx, j, 2, tmp); 633 tmp = EMIT_VERTS(ctx, j + 3, 1, tmp); 632 tmp = EMIT_VERTS(ctx, j, 2, tmp); _tnl_emit_vertices_to_buffer (ctx=0x880faf0, start=0, end=2, dest=0xb5dc9cf4) at tnl/t_vertex.c:447 447 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 449 update_input_ptrs(ctx, start); 446 { 447 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 449 update_input_ptrs(ctx, start); update_input_ptrs (ctx=0x880faf0, start=0) at tnl/t_vertex.c:397 397 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 396 { 398 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 399 struct tnl_clipspace_attr *a = vtx->attr; 397 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 398 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 399 struct tnl_clipspace_attr *a = vtx->attr; 400 const GLuint count = vtx->attr_count; 404 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 406 if (vtx->emit != choose_emit_func) { 411 a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride; 403 for (j = 0; j < count; j++) { 404 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 406 if (vtx->emit != choose_emit_func) { 411 a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride; 403 for (j = 0; j < count; j++) { 404 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 406 if (vtx->emit != choose_emit_func) { 411 a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride; 403 for (j = 0; j < count; j++) { 414 if (a->vp) { 424 } _tnl_emit_vertices_to_buffer (ctx=0x880faf0, start=0, end=2, dest=0xb5dc9cf4) at tnl/t_vertex.c:452 452 vtx->emit( ctx, end - start, (GLubyte*) dest ); 447 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 452 vtx->emit( ctx, end - start, (GLubyte*) dest ); choose_emit_func (ctx=0x880faf0, count=2, dest=0xb5dc9cf4 "") at tnl/t_vertex.c:111 111 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 110 { 111 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 112 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 114 const GLuint attr_count = vtx->attr_count; 118 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 119 a[j].inputstride = vptr->stride; 120 a[j].inputsize = vptr->size; 121 a[j].emit = a[j].insert[vptr->size - 1]; /* not always used */ 117 for (j = 0; j < attr_count; j++) { 118 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 119 a[j].inputstride = vptr->stride; 120 a[j].inputsize = vptr->size; 121 a[j].emit = a[j].insert[vptr->size - 1]; /* not always used */ 117 for (j = 0; j < attr_count; j++) { 118 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 119 a[j].inputstride = vptr->stride; 120 a[j].inputsize = vptr->size; 121 a[j].emit = a[j].insert[vptr->size - 1]; /* not always used */ 117 for (j = 0; j < attr_count; j++) { 68 struct tnl_clipspace_fastpath *fp = vtx->fastpath; 124 vtx->emit = NULL; 135 else if (vtx->codegen_emit) { 140 _tnl_generate_hardwired_emit(ctx); _tnl_generate_hardwired_emit (ctx=0x880faf0) at tnl/t_vertex_generic.c:934 934 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 933 { 934 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 940 switch (vtx->attr_count) { 954 if (vtx->attr[2].emit == insert_2f_2) { 955 if (vtx->attr[1].emit == insert_4ub_4f_rgba_4) { 961 else if (vtx->attr[1].emit == insert_4ub_4f_bgra_4 && 982 vtx->emit = func; 983 } choose_emit_func (ctx=0x880faf0, count=2, dest=0xb5dc9cf4 "") at tnl/t_vertex.c:145 145 if (!vtx->emit) 146 vtx->emit = _tnl_generic_emit; 148 vtx->emit( ctx, count, dest ); _tnl_generic_emit (ctx=0x880faf0, count=2, v=0xb5dc9cf4 "") at tnl/t_vertex_generic.c:994 994 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 996 const GLuint attr_count = vtx->attr_count; 997 const GLuint stride = vtx->vertex_size; 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4f_4 (a=0x8869de0, v=0xb5dc9cf4 "", in=0x886e2e0) at tnl/t_vertex_generic.c:156 156 { 160 out[0] = in[0]; 161 out[1] = in[1]; 162 out[2] = in[2]; 163 out[3] = in[3]; 164 } _tnl_generic_emit (ctx=0x880faf0, count=2, v=0xb5dc9cf4 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4ub_4f_rgba_3 (a=0x8869e0c, v=0xb5dc9d04 "", in=0x8859888) at tnl/t_vertex_generic.c:340 340 { 343 UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]); 344 UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]); 345 UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[2]); 346 v[3] = 0xff; 347 } _tnl_generic_emit (ctx=0x880faf0, count=2, v=0xb5dc9cf4 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_2f_2 (a=0x8869e38, v=0xb5dc9d08 "", in=0x8859894) at tnl/t_vertex_generic.c:248 248 { 252 out[0] = in[0]; 253 out[1] = in[1]; 254 } _tnl_generic_emit (ctx=0x880faf0, count=2, v=0xb5dc9cf4 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1000 for (i = 0 ; i < count ; i++, v += stride) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4f_4 (a=0x8869de0, v=0xb5dc9d10 "", in=0x886e2f0) at tnl/t_vertex_generic.c:156 156 { 160 out[0] = in[0]; 161 out[1] = in[1]; 162 out[2] = in[2]; 163 out[3] = in[3]; 164 } _tnl_generic_emit (ctx=0x880faf0, count=2, v=0xb5dc9d10 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4ub_4f_rgba_3 (a=0x8869e0c, v=0xb5dc9d20 "", in=0x88598a4) at tnl/t_vertex_generic.c:340 340 { 343 UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]); 344 UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]); 345 UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[2]); 346 v[3] = 0xff; 347 } _tnl_generic_emit (ctx=0x880faf0, count=2, v=0xb5dc9d10 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_2f_2 (a=0x8869e38, v=0xb5dc9d24 "", in=0x88598b0) at tnl/t_vertex_generic.c:248 248 { 252 out[0] = in[0]; 253 out[1] = in[1]; 254 } _tnl_generic_emit (ctx=0x880faf0, count=2, v=0xb5dc9d10 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1000 for (i = 0 ; i < count ; i++, v += stride) { 1007 } choose_emit_func (ctx=0x880faf0, count=2, dest=0xb5dc9cf4 "") at tnl/t_vertex.c:149 149 } _tnl_emit_vertices_to_buffer (ctx=0x880faf0, start=0, end=2, dest=0xb5dc9cf4) at tnl/t_vertex.c:454 454 } 452 vtx->emit( ctx, end - start, (GLubyte*) dest ); 454 } radeon_dma_render_quads_verts (ctx=0x880faf0, start=0, count=4, flags=7) at ../../../../../src/mesa/tnl_dd/t_dd_dmatmp.h:633 633 tmp = EMIT_VERTS(ctx, j + 3, 1, tmp); _tnl_emit_vertices_to_buffer (ctx=0x880faf0, start=3, end=4, dest=0xb5dc9d2c) at tnl/t_vertex.c:447 447 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 449 update_input_ptrs(ctx, start); 446 { 447 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 449 update_input_ptrs(ctx, start); update_input_ptrs (ctx=0x880faf0, start=3) at tnl/t_vertex.c:397 397 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 396 { 398 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 399 struct tnl_clipspace_attr *a = vtx->attr; 397 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 398 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 399 struct tnl_clipspace_attr *a = vtx->attr; 400 const GLuint count = vtx->attr_count; 404 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 406 if (vtx->emit != choose_emit_func) { 407 assert(a[j].inputstride == vptr->stride); 408 assert(a[j].inputsize == vptr->size); 411 a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride; 403 for (j = 0; j < count; j++) { 404 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 406 if (vtx->emit != choose_emit_func) { 407 assert(a[j].inputstride == vptr->stride); 408 assert(a[j].inputsize == vptr->size); 411 a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride; 403 for (j = 0; j < count; j++) { 404 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 406 if (vtx->emit != choose_emit_func) { 407 assert(a[j].inputstride == vptr->stride); 408 assert(a[j].inputsize == vptr->size); 411 a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride; 403 for (j = 0; j < count; j++) { 414 if (a->vp) { 424 } _tnl_emit_vertices_to_buffer (ctx=0x880faf0, start=3, end=4, dest=0xb5dc9d2c) at tnl/t_vertex.c:452 452 vtx->emit( ctx, end - start, (GLubyte*) dest ); 447 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 452 vtx->emit( ctx, end - start, (GLubyte*) dest ); _tnl_generic_emit (ctx=0x880faf0, count=1, v=0xb5dc9d2c "") at tnl/t_vertex_generic.c:994 994 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 996 const GLuint attr_count = vtx->attr_count; 997 const GLuint stride = vtx->vertex_size; 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4f_4 (a=0x8869de0, v=0xb5dc9d2c "", in=0x886e310) at tnl/t_vertex_generic.c:156 156 { 160 out[0] = in[0]; 161 out[1] = in[1]; 162 out[2] = in[2]; 163 out[3] = in[3]; 164 } _tnl_generic_emit (ctx=0x880faf0, count=1, v=0xb5dc9d2c "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4ub_4f_rgba_3 (a=0x8869e0c, v=0xb5dc9d3c "", in=0x88598dc) at tnl/t_vertex_generic.c:340 340 { 343 UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]); 344 UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]); 345 UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[2]); 346 v[3] = 0xff; 347 } _tnl_generic_emit (ctx=0x880faf0, count=1, v=0xb5dc9d2c "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_2f_2 (a=0x8869e38, v=0xb5dc9d40 "", in=0x88598e8) at tnl/t_vertex_generic.c:248 248 { 252 out[0] = in[0]; 253 out[1] = in[1]; 254 } _tnl_generic_emit (ctx=0x880faf0, count=1, v=0xb5dc9d2c "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1000 for (i = 0 ; i < count ; i++, v += stride) { 1007 } _tnl_emit_vertices_to_buffer (ctx=0x880faf0, start=3, end=4, dest=0xb5dc9d2c) at tnl/t_vertex.c:454 454 } 452 vtx->emit( ctx, end - start, (GLubyte*) dest ); 454 } radeon_dma_render_quads_verts (ctx=0x880faf0, start=0, count=4, flags=7) at ../../../../../src/mesa/tnl_dd/t_dd_dmatmp.h:636 636 tmp = EMIT_VERTS(ctx, j + 1, 3, tmp); _tnl_emit_vertices_to_buffer (ctx=0x880faf0, start=1, end=4, dest=0xb5dc9d48) at tnl/t_vertex.c:447 447 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 449 update_input_ptrs(ctx, start); 446 { 447 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 449 update_input_ptrs(ctx, start); update_input_ptrs (ctx=0x880faf0, start=1) at tnl/t_vertex.c:397 397 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 396 { 398 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 399 struct tnl_clipspace_attr *a = vtx->attr; 397 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb; 398 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 399 struct tnl_clipspace_attr *a = vtx->attr; 400 const GLuint count = vtx->attr_count; 404 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 406 if (vtx->emit != choose_emit_func) { 407 assert(a[j].inputstride == vptr->stride); 408 assert(a[j].inputsize == vptr->size); 411 a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride; 403 for (j = 0; j < count; j++) { 404 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 406 if (vtx->emit != choose_emit_func) { 407 assert(a[j].inputstride == vptr->stride); 408 assert(a[j].inputsize == vptr->size); 411 a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride; 403 for (j = 0; j < count; j++) { 404 GLvector4f *vptr = VB->AttribPtr[a[j].attrib]; 406 if (vtx->emit != choose_emit_func) { 407 assert(a[j].inputstride == vptr->stride); 408 assert(a[j].inputsize == vptr->size); 411 a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride; 403 for (j = 0; j < count; j++) { 414 if (a->vp) { 424 } _tnl_emit_vertices_to_buffer (ctx=0x880faf0, start=1, end=4, dest=0xb5dc9d48) at tnl/t_vertex.c:452 452 vtx->emit( ctx, end - start, (GLubyte*) dest ); 447 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 452 vtx->emit( ctx, end - start, (GLubyte*) dest ); _tnl_generic_emit (ctx=0x880faf0, count=3, v=0xb5dc9d48 "") at tnl/t_vertex_generic.c:994 994 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); 996 const GLuint attr_count = vtx->attr_count; 997 const GLuint stride = vtx->vertex_size; 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4f_4 (a=0x8869de0, v=0xb5dc9d48 "", in=0x886e2f0) at tnl/t_vertex_generic.c:156 156 { 160 out[0] = in[0]; 161 out[1] = in[1]; 162 out[2] = in[2]; 163 out[3] = in[3]; 164 } _tnl_generic_emit (ctx=0x880faf0, count=3, v=0xb5dc9d48 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4ub_4f_rgba_3 (a=0x8869e0c, v=0xb5dc9d58 "", in=0x88598a4) at tnl/t_vertex_generic.c:340 340 { 343 UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]); 344 UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]); 345 UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[2]); 346 v[3] = 0xff; 347 } _tnl_generic_emit (ctx=0x880faf0, count=3, v=0xb5dc9d48 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_2f_2 (a=0x8869e38, v=0xb5dc9d5c "", in=0x88598b0) at tnl/t_vertex_generic.c:248 248 { 252 out[0] = in[0]; 253 out[1] = in[1]; 254 } _tnl_generic_emit (ctx=0x880faf0, count=3, v=0xb5dc9d48 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1000 for (i = 0 ; i < count ; i++, v += stride) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4f_4 (a=0x8869de0, v=0xb5dc9d64 "", in=0x886e300) at tnl/t_vertex_generic.c:156 156 { 160 out[0] = in[0]; 161 out[1] = in[1]; 162 out[2] = in[2]; 163 out[3] = in[3]; 164 } _tnl_generic_emit (ctx=0x880faf0, count=3, v=0xb5dc9d64 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4ub_4f_rgba_3 (a=0x8869e0c, v=0xb5dc9d74 "", in=0x88598c0) at tnl/t_vertex_generic.c:340 340 { 343 UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]); 344 UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]); 345 UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[2]); 346 v[3] = 0xff; 347 } _tnl_generic_emit (ctx=0x880faf0, count=3, v=0xb5dc9d64 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_2f_2 (a=0x8869e38, v=0xb5dc9d78 "", in=0x88598cc) at tnl/t_vertex_generic.c:248 248 { 252 out[0] = in[0]; 253 out[1] = in[1]; 254 } _tnl_generic_emit (ctx=0x880faf0, count=3, v=0xb5dc9d64 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1000 for (i = 0 ; i < count ; i++, v += stride) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4f_4 (a=0x8869de0, v=0xb5dc9d80 "", in=0x886e310) at tnl/t_vertex_generic.c:156 156 { 160 out[0] = in[0]; 161 out[1] = in[1]; 162 out[2] = in[2]; 163 out[3] = in[3]; 164 } _tnl_generic_emit (ctx=0x880faf0, count=3, v=0xb5dc9d80 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_4ub_4f_rgba_3 (a=0x8869e0c, v=0xb5dc9d90 "", in=0x88598dc) at tnl/t_vertex_generic.c:340 340 { 343 UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]); 344 UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]); 345 UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[2]); 346 v[3] = 0xff; 347 } _tnl_generic_emit (ctx=0x880faf0, count=3, v=0xb5dc9d80 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1001 for (j = 0; j < attr_count; j++) { 1002 GLfloat *in = (GLfloat *)a[j].inputptr; 1003 a[j].inputptr += a[j].inputstride; 1004 a[j].emit( &a[j], v + a[j].vertoffset, in ); insert_2f_2 (a=0x8869e38, v=0xb5dc9d94 "", in=0x88598e8) at tnl/t_vertex_generic.c:248 248 { 252 out[0] = in[0]; 253 out[1] = in[1]; 254 } _tnl_generic_emit (ctx=0x880faf0, count=3, v=0xb5dc9d80 "") at tnl/t_vertex_generic.c:1001 1001 for (j = 0; j < attr_count; j++) { 1000 for (i = 0 ; i < count ; i++, v += stride) { 1007 } _tnl_emit_vertices_to_buffer (ctx=0x880faf0, start=1, end=4, dest=0xb5dc9d48) at tnl/t_vertex.c:454 454 } 452 vtx->emit( ctx, end - start, (GLubyte*) dest ); 454 } radeon_dma_render_quads_verts (ctx=0x880faf0, start=0, count=4, flags=7) at ../../../../../src/mesa/tnl_dd/t_dd_dmatmp.h:628 628 for (j = start; j < count-3; j += 4) { 645 } radeon_run_render (ctx=0x880faf0, stage=0x8869a94) at radeon_swtcl.c:436 436 for (i = 0 ; i < VB->PrimitiveCount ; i++) 454 tnl->Driver.Render.Finish( ctx ); radeonRenderFinish (ctx=0x880faf0) at radeon_swtcl.c:760 760 } radeon_run_render (ctx=0x880faf0, stage=0x8869a94) at radeon_swtcl.c:456 456 return GL_FALSE; /* finished the pipe */ 457 } _tnl_run_pipeline (ctx=0x880faf0) at tnl/t_pipeline.c:162 162 END_FAST_MATH(__tmp); 163 } radeonWrapRunPipeline (ctx=0x880faf0) at radeon_state.c:2357 2357 } _tnl_draw_prims (ctx=0x880faf0, arrays=0x8857c90, prim=0x88567ec, nr_prims=1, ib=0x0, min_index=0, max_index=3) at tnl/t_draw.c:326 326 for (i = 0; i < nr_bo; i++) { 56 TNLcontext *tnl = TNL_CONTEXT(ctx); 60 tnl->nr_blocks = 0; 407 } vbo_exec_vtx_flush (exec=0x88566c8) at vbo/vbo_exec_draw.c:260 260 if (exec->vtx.bufferobj->Name) { 268 exec->vtx.prim_count = 0; 269 exec->vtx.vert_count = 0; 270 exec->vtx.vbptr = (GLfloat *)exec->vtx.buffer_map; 271 } vbo_exec_FlushVertices (ctx=0x880faf0, flags=1) at vbo/vbo_exec_api.c:754 754 if (exec->vtx.vertex_size) { 755 vbo_exec_copy_to_current( exec ); vbo_exec_copy_to_current (exec=0x88566c8) at vbo/vbo_exec_api.c:139 139 { 140 GLcontext *ctx = exec->ctx; 139 { 140 GLcontext *ctx = exec->ctx; 139 { 140 GLcontext *ctx = exec->ctx; 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 146 GLfloat *current = (GLfloat *)vbo->currval[i].Ptr; 151 COPY_CLEAN_4V(current, 161 vbo->currval[i].Size = exec->vtx.attrsz[i]; 166 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT && 161 vbo->currval[i].Size = exec->vtx.attrsz[i]; 166 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT && 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 146 GLfloat *current = (GLfloat *)vbo->currval[i].Ptr; 151 COPY_CLEAN_4V(current, 161 vbo->currval[i].Size = exec->vtx.attrsz[i]; 166 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT && 161 vbo->currval[i].Size = exec->vtx.attrsz[i]; 166 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT && 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 145 if (exec->vtx.attrsz[i]) { 144 for (i = VBO_ATTRIB_POS+1 ; i < VBO_ATTRIB_MAX ; i++) { 174 if (ctx->Light.ColorMaterialEnabled && 180 ctx->Driver.NeedFlush &= ~FLUSH_UPDATE_CURRENT; 181 } vbo_exec_FlushVertices (ctx=0x880faf0, flags=1) at vbo/vbo_exec_api.c:756 756 reset_attrfv( exec ); reset_attrfv (exec=0x88566c8) at vbo/vbo_exec_api.c:768 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 768 exec->vtx.attrsz[i] = 0; 769 exec->vtx.active_sz[i] = 0; 767 for (i = 0 ; i < VBO_ATTRIB_MAX ; i++) { 773 } reset_attrfv (exec=0x88566c8) at vbo/vbo_exec_api.c:772 772 exec->vtx.vertex_size = 0; 773 } vbo_exec_FlushVertices (ctx=0x880faf0, flags=1) at vbo/vbo_exec_api.c:759 759 exec->ctx->Driver.NeedFlush = 0; 760 } enable_texture (ctx=0x880faf0, state=44 ',', bit=142960328) at main/enable.c:239 239 texUnit->Enabled = newenabled; 238 FLUSH_VERTICES(ctx, _NEW_TEXTURE); 239 texUnit->Enabled = newenabled; 241 } _mesa_set_enable (ctx=0x880faf0, cap=3553, state=0 '\0') at main/enable.c:996 996 if (ctx->Driver.Enable) { 997 ctx->Driver.Enable( ctx, cap, state ); radeonEnable (ctx=0x880faf0, cap=3553, state=0 '\0') at radeon_state.c:1728 1728 { 1729 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 1732 if ( RADEON_DEBUG & DEBUG_STATE ) 1737 switch ( cap ) { 2008 } _mesa_set_enable (ctx=0x880faf0, cap=3553, state=0 '\0') at main/enable.c:999 999 } _mesa_Disable (cap=3553) at main/enable.c:1027 1027 } glDisable (cap=3553) at ../../../src/mesa/glapi/glapitemp.h:1156 1156 ../../../src/mesa/glapi/glapitemp.h: No such file or directory. in ../../../src/mesa/glapi/glapitemp.h 1155 in ../../../src/mesa/glapi/glapitemp.h 1156 in ../../../src/mesa/glapi/glapitemp.h ogl_urect (left=63, top=151, right=121, bot=193) at arch/ogl/gr.c:437 437 if (Gr_scanline_darkening_level >= GR_FADE_LEVELS)