1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <sys/errno.h>
30 #include "main/glheader.h"
31 #include "main/context.h"
32 #include "main/condrender.h"
33 #include "main/samplerobj.h"
34 #include "main/state.h"
35 #include "main/enums.h"
36 #include "main/macros.h"
37 #include "main/transformfeedback.h"
39 #include "vbo/vbo_context.h"
40 #include "swrast/swrast.h"
41 #include "swrast_setup/swrast_setup.h"
42 #include "drivers/common/meta.h"
45 #include "brw_defines.h"
46 #include "brw_context.h"
47 #include "brw_state.h"
49 #include "intel_batchbuffer.h"
50 #include "intel_fbo.h"
51 #include "intel_mipmap_tree.h"
52 #include "intel_regions.h"
54 #define FILE_DEBUG_FLAG DEBUG_PRIMS
56 static GLuint prim_to_hw_prim[GL_POLYGON+1] = {
70 static const GLenum reduced_prim[GL_POLYGON+1] = {
84 /* When the primitive changes, set a state bit and re-validate. Not
85 * the nicest and would rather deal with this by having all the
86 * programs be immune to the active primitive (ie. cope with all
87 * possibilities). That may not be realistic however.
89 static void brw_set_prim(struct brw_context *brw,
90 const struct _mesa_prim *prim)
92 struct gl_context *ctx = &brw->intel.ctx;
93 uint32_t hw_prim = prim_to_hw_prim[prim->mode];
95 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
97 /* Slight optimization to avoid the GS program when not needed:
99 if (prim->mode == GL_QUAD_STRIP &&
100 ctx->Light.ShadeModel != GL_FLAT &&
101 ctx->Polygon.FrontMode == GL_FILL &&
102 ctx->Polygon.BackMode == GL_FILL)
103 hw_prim = _3DPRIM_TRISTRIP;
105 if (prim->mode == GL_QUADS && prim->count == 4 &&
106 ctx->Light.ShadeModel != GL_FLAT &&
107 ctx->Polygon.FrontMode == GL_FILL &&
108 ctx->Polygon.BackMode == GL_FILL) {
109 hw_prim = _3DPRIM_TRIFAN;
112 if (hw_prim != brw->primitive) {
113 brw->primitive = hw_prim;
114 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
116 if (reduced_prim[prim->mode] != brw->intel.reduced_primitive) {
117 brw->intel.reduced_primitive = reduced_prim[prim->mode];
118 brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
123 static void gen6_set_prim(struct brw_context *brw,
124 const struct _mesa_prim *prim)
128 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
130 hw_prim = prim_to_hw_prim[prim->mode];
132 if (hw_prim != brw->primitive) {
133 brw->primitive = hw_prim;
134 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
140 * The hardware is capable of removing dangling vertices on its own; however,
141 * prior to Gen6, we sometimes convert quads into trifans (and quad strips
142 * into tristrips), since pre-Gen6 hardware requires a GS to render quads.
143 * This function manually trims dangling vertices from a draw call involving
144 * quads so that those dangling vertices won't get drawn when we convert to
147 static GLuint trim(GLenum prim, GLuint length)
149 if (prim == GL_QUAD_STRIP)
150 return length > 3 ? (length - length % 2) : 0;
151 else if (prim == GL_QUADS)
152 return length - length % 4;
158 static void brw_emit_prim(struct brw_context *brw,
159 const struct _mesa_prim *prim,
162 struct intel_context *intel = &brw->intel;
163 int verts_per_instance;
164 int vertex_access_type;
165 int start_vertex_location;
166 int base_vertex_location;
168 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
169 prim->start, prim->count);
171 start_vertex_location = prim->start;
172 base_vertex_location = prim->basevertex;
174 vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
175 start_vertex_location += brw->ib.start_vertex_offset;
176 base_vertex_location += brw->vb.start_vertex_bias;
178 vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
179 start_vertex_location += brw->vb.start_vertex_bias;
182 /* We only need to trim the primitive count on pre-Gen6. */
184 verts_per_instance = trim(prim->mode, prim->count);
186 verts_per_instance = prim->count;
188 /* If nothing to emit, just return. */
189 if (verts_per_instance == 0)
192 /* If we're set to always flush, do it before and after the primitive emit.
193 * We want to catch both missed flushes that hurt instruction/state cache
194 * and missed flushes of the render cache as it heads to other parts of
195 * the besides the draw code.
197 if (intel->always_flush_cache) {
198 intel_batchbuffer_emit_mi_flush(intel);
202 OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
203 hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
205 OUT_BATCH(verts_per_instance);
206 OUT_BATCH(start_vertex_location);
207 OUT_BATCH(prim->num_instances);
208 OUT_BATCH(prim->base_instance);
209 OUT_BATCH(base_vertex_location);
212 intel->batch.need_workaround_flush = true;
214 if (intel->always_flush_cache) {
215 intel_batchbuffer_emit_mi_flush(intel);
219 static void gen7_emit_prim(struct brw_context *brw,
220 const struct _mesa_prim *prim,
223 struct intel_context *intel = &brw->intel;
224 int verts_per_instance;
225 int vertex_access_type;
226 int start_vertex_location;
227 int base_vertex_location;
229 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
230 prim->start, prim->count);
232 start_vertex_location = prim->start;
233 base_vertex_location = prim->basevertex;
235 vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
236 start_vertex_location += brw->ib.start_vertex_offset;
237 base_vertex_location += brw->vb.start_vertex_bias;
239 vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
240 start_vertex_location += brw->vb.start_vertex_bias;
243 verts_per_instance = prim->count;
245 /* If nothing to emit, just return. */
246 if (verts_per_instance == 0)
249 /* If we're set to always flush, do it before and after the primitive emit.
250 * We want to catch both missed flushes that hurt instruction/state cache
251 * and missed flushes of the render cache as it heads to other parts of
252 * the besides the draw code.
254 if (intel->always_flush_cache) {
255 intel_batchbuffer_emit_mi_flush(intel);
259 OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
260 OUT_BATCH(hw_prim | vertex_access_type);
261 OUT_BATCH(verts_per_instance);
262 OUT_BATCH(start_vertex_location);
263 OUT_BATCH(prim->num_instances);
264 OUT_BATCH(prim->base_instance);
265 OUT_BATCH(base_vertex_location);
268 if (intel->always_flush_cache) {
269 intel_batchbuffer_emit_mi_flush(intel);
274 static void brw_merge_inputs( struct brw_context *brw,
275 const struct gl_client_array *arrays[])
279 for (i = 0; i < brw->vb.nr_buffers; i++) {
280 drm_intel_bo_unreference(brw->vb.buffers[i].bo);
281 brw->vb.buffers[i].bo = NULL;
283 brw->vb.nr_buffers = 0;
285 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
286 brw->vb.inputs[i].buffer = -1;
287 brw->vb.inputs[i].glarray = arrays[i];
288 brw->vb.inputs[i].attrib = (gl_vert_attrib) i;
293 * \brief Resolve buffers before drawing.
295 * Resolve the depth buffer's HiZ buffer and resolve the depth buffer of each
296 * enabled depth texture.
298 * (In the future, this will also perform MSAA resolves).
301 brw_predraw_resolve_buffers(struct brw_context *brw)
303 struct gl_context *ctx = &brw->intel.ctx;
304 struct intel_context *intel = &brw->intel;
305 struct intel_renderbuffer *depth_irb;
306 struct intel_texture_object *tex_obj;
308 /* Resolve the depth buffer's HiZ buffer. */
309 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
311 intel_renderbuffer_resolve_hiz(intel, depth_irb);
313 /* Resolve depth buffer of each enabled depth texture. */
314 for (int i = 0; i < BRW_MAX_TEX_UNIT; i++) {
315 if (!ctx->Texture.Unit[i]._ReallyEnabled)
317 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
318 if (!tex_obj || !tex_obj->mt)
320 intel_miptree_all_slices_resolve_depth(intel, tex_obj->mt);
325 * \brief Call this after drawing to mark which buffers need resolving
327 * If the depth buffer was written to and if it has an accompanying HiZ
328 * buffer, then mark that it needs a depth resolve.
330 * If the color buffer is a multisample window system buffer, then
331 * mark that it needs a downsample.
333 static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
335 struct intel_context *intel = &brw->intel;
336 struct gl_context *ctx = &brw->intel.ctx;
337 struct gl_framebuffer *fb = ctx->DrawBuffer;
339 struct intel_renderbuffer *front_irb = NULL;
340 struct intel_renderbuffer *back_irb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
341 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
343 if (intel->is_front_buffer_rendering)
344 front_irb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
347 intel_renderbuffer_set_needs_downsample(front_irb);
349 intel_renderbuffer_set_needs_downsample(back_irb);
350 if (depth_irb && ctx->Depth.Mask)
351 intel_renderbuffer_set_needs_depth_resolve(depth_irb);
354 /* May fail if out of video memory for texture or vbo upload, or on
355 * fallback conditions.
357 static bool brw_try_draw_prims( struct gl_context *ctx,
358 const struct gl_client_array *arrays[],
359 const struct _mesa_prim *prim,
361 const struct _mesa_index_buffer *ib,
365 struct intel_context *intel = intel_context(ctx);
366 struct brw_context *brw = brw_context(ctx);
369 bool fail_next = false;
372 _mesa_update_state( ctx );
374 /* We have to validate the textures *before* checking for fallbacks;
375 * otherwise, the software fallback won't be able to rely on the
376 * texture state, the firstLevel and lastLevel fields won't be
377 * set in the intel texture object (they'll both be 0), and the
378 * software fallback will segfault if it attempts to access any
379 * texture level other than level 0.
381 brw_validate_textures( brw );
383 intel_prepare_render(intel);
385 /* This workaround has to happen outside of brw_upload_state() because it
386 * may flush the batchbuffer for a blit, affecting the state flags.
388 brw_workaround_depthstencil_alignment(brw, 0);
390 /* Resolves must occur after updating renderbuffers, updating context state,
391 * and finalizing textures but before setting up any hardware state for
394 brw_predraw_resolve_buffers(brw);
396 /* Bind all inputs, derive varying and size information:
398 brw_merge_inputs( brw, arrays );
401 brw->state.dirty.brw |= BRW_NEW_INDICES;
403 brw->vb.min_index = min_index;
404 brw->vb.max_index = max_index;
405 brw->state.dirty.brw |= BRW_NEW_VERTICES;
407 for (i = 0; i < nr_prims; i++) {
408 int estimated_max_prim_size;
410 estimated_max_prim_size = 512; /* batchbuffer commands */
411 estimated_max_prim_size += (BRW_MAX_TEX_UNIT *
412 (sizeof(struct brw_sampler_state) +
413 sizeof(struct gen5_sampler_default_color)));
414 estimated_max_prim_size += 1024; /* gen6 VS push constants */
415 estimated_max_prim_size += 1024; /* gen6 WM push constants */
416 estimated_max_prim_size += 512; /* misc. pad */
418 /* Flush the batch if it's approaching full, so that we don't wrap while
419 * we've got validated state that needs to be in the same batch as the
422 intel_batchbuffer_require_space(intel, estimated_max_prim_size, false);
423 intel_batchbuffer_save_state(intel);
425 if (brw->num_instances != prim->num_instances) {
426 brw->num_instances = prim->num_instances;
427 brw->state.dirty.brw |= BRW_NEW_VERTICES;
429 if (brw->basevertex != prim->basevertex) {
430 brw->basevertex = prim->basevertex;
431 brw->state.dirty.brw |= BRW_NEW_VERTICES;
434 brw_set_prim(brw, &prim[i]);
436 gen6_set_prim(brw, &prim[i]);
439 /* Note that before the loop, brw->state.dirty.brw was set to != 0, and
440 * that the state updated in the loop outside of this block is that in
441 * *_set_prim or intel_batchbuffer_flush(), which only impacts
442 * brw->state.dirty.brw.
444 if (brw->state.dirty.brw) {
445 intel->no_batch_wrap = true;
446 brw_upload_state(brw);
450 gen7_emit_prim(brw, &prim[i], brw->primitive);
452 brw_emit_prim(brw, &prim[i], brw->primitive);
454 intel->no_batch_wrap = false;
456 if (dri_bufmgr_check_aperture_space(&intel->batch.bo, 1)) {
458 intel_batchbuffer_reset_to_saved(intel);
459 intel_batchbuffer_flush(intel);
463 if (intel_batchbuffer_flush(intel) == -ENOSPC) {
464 static bool warned = false;
467 fprintf(stderr, "i965: Single primitive emit exceeded"
468 "available aperture space\n");
478 if (intel->always_flush_batch)
479 intel_batchbuffer_flush(intel);
481 brw_state_cache_check_size(brw);
482 brw_postdraw_set_buffers_need_resolve(brw);
487 void brw_draw_prims( struct gl_context *ctx,
488 const struct _mesa_prim *prim,
490 const struct _mesa_index_buffer *ib,
491 GLboolean index_bounds_valid,
494 struct gl_transform_feedback_object *tfb_vertcount )
496 struct intel_context *intel = intel_context(ctx);
497 const struct gl_client_array **arrays = ctx->Array._DrawArrays;
499 if (!_mesa_check_conditional_render(ctx))
502 /* Handle primitive restart if needed */
503 if (brw_handle_primitive_restart(ctx, prim, nr_prims, ib)) {
504 /* The draw was handled, so we can exit now */
508 /* If we're going to have to upload any of the user's vertex arrays, then
509 * get the minimum and maximum of their index buffer so we know what range
512 if (!vbo_all_varyings_in_vbos(arrays) && !index_bounds_valid)
513 vbo_get_minmax_indices(ctx, prim, ib, &min_index, &max_index, nr_prims);
515 /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it
516 * won't support all the extensions we support.
518 if (ctx->RenderMode != GL_RENDER) {
519 perf_debug("%s render mode not supported in hardware\n",
520 _mesa_lookup_enum_by_nr(ctx->RenderMode));
521 _swsetup_Wakeup(ctx);
523 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
527 /* Try drawing with the hardware, but don't do anything else if we can't
528 * manage it. swrast doesn't support our featureset, so we can't fall back
531 brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
534 void brw_draw_init( struct brw_context *brw )
536 struct gl_context *ctx = &brw->intel.ctx;
537 struct vbo_context *vbo = vbo_context(ctx);
540 /* Register our drawing function:
542 vbo->draw_prims = brw_draw_prims;
544 for (i = 0; i < VERT_ATTRIB_MAX; i++)
545 brw->vb.inputs[i].buffer = -1;
546 brw->vb.nr_buffers = 0;
547 brw->vb.nr_enabled = 0;
550 void brw_draw_destroy( struct brw_context *brw )
554 for (i = 0; i < brw->vb.nr_buffers; i++) {
555 drm_intel_bo_unreference(brw->vb.buffers[i].bo);
556 brw->vb.buffers[i].bo = NULL;
558 brw->vb.nr_buffers = 0;
560 for (i = 0; i < brw->vb.nr_enabled; i++) {
561 brw->vb.enabled[i]->buffer = -1;
563 brw->vb.nr_enabled = 0;
565 drm_intel_bo_unreference(brw->ib.bo);