1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <sys/errno.h>
30 #include "main/glheader.h"
31 #include "main/context.h"
32 #include "main/condrender.h"
33 #include "main/samplerobj.h"
34 #include "main/state.h"
35 #include "main/enums.h"
36 #include "main/macros.h"
38 #include "vbo/vbo_context.h"
39 #include "swrast/swrast.h"
40 #include "swrast_setup/swrast_setup.h"
41 #include "drivers/common/meta.h"
44 #include "brw_defines.h"
45 #include "brw_context.h"
46 #include "brw_state.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_fbo.h"
50 #include "intel_mipmap_tree.h"
51 #include "intel_regions.h"
53 #define FILE_DEBUG_FLAG DEBUG_PRIMS
55 static GLuint prim_to_hw_prim[GL_POLYGON+1] = {
69 static const GLenum reduced_prim[GL_POLYGON+1] = {
83 /* When the primitive changes, set a state bit and re-validate. Not
84 * the nicest and would rather deal with this by having all the
85 * programs be immune to the active primitive (ie. cope with all
86 * possibilities). That may not be realistic however.
88 static void brw_set_prim(struct brw_context *brw,
89 const struct _mesa_prim *prim)
91 struct gl_context *ctx = &brw->intel.ctx;
92 uint32_t hw_prim = prim_to_hw_prim[prim->mode];
94 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
96 /* Slight optimization to avoid the GS program when not needed:
98 if (prim->mode == GL_QUAD_STRIP &&
99 ctx->Light.ShadeModel != GL_FLAT &&
100 ctx->Polygon.FrontMode == GL_FILL &&
101 ctx->Polygon.BackMode == GL_FILL)
102 hw_prim = _3DPRIM_TRISTRIP;
104 if (prim->mode == GL_QUADS && prim->count == 4 &&
105 ctx->Light.ShadeModel != GL_FLAT &&
106 ctx->Polygon.FrontMode == GL_FILL &&
107 ctx->Polygon.BackMode == GL_FILL) {
108 hw_prim = _3DPRIM_TRIFAN;
111 if (hw_prim != brw->primitive) {
112 brw->primitive = hw_prim;
113 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
115 if (reduced_prim[prim->mode] != brw->intel.reduced_primitive) {
116 brw->intel.reduced_primitive = reduced_prim[prim->mode];
117 brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
122 static void gen6_set_prim(struct brw_context *brw,
123 const struct _mesa_prim *prim)
127 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
129 hw_prim = prim_to_hw_prim[prim->mode];
131 if (hw_prim != brw->primitive) {
132 brw->primitive = hw_prim;
133 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
138 static GLuint trim(GLenum prim, GLuint length)
140 if (prim == GL_QUAD_STRIP)
141 return length > 3 ? (length - length % 2) : 0;
142 else if (prim == GL_QUADS)
143 return length - length % 4;
149 static void brw_emit_prim(struct brw_context *brw,
150 const struct _mesa_prim *prim,
153 struct intel_context *intel = &brw->intel;
154 int verts_per_instance;
155 int vertex_access_type;
156 int start_vertex_location;
157 int base_vertex_location;
159 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
160 prim->start, prim->count);
162 start_vertex_location = prim->start;
163 base_vertex_location = prim->basevertex;
165 vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
166 start_vertex_location += brw->ib.start_vertex_offset;
167 base_vertex_location += brw->vb.start_vertex_bias;
169 vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
170 start_vertex_location += brw->vb.start_vertex_bias;
173 verts_per_instance = trim(prim->mode, prim->count);
175 /* If nothing to emit, just return. */
176 if (verts_per_instance == 0)
179 /* If we're set to always flush, do it before and after the primitive emit.
180 * We want to catch both missed flushes that hurt instruction/state cache
181 * and missed flushes of the render cache as it heads to other parts of
182 * the besides the draw code.
184 if (intel->always_flush_cache) {
185 intel_batchbuffer_emit_mi_flush(intel);
189 OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
190 hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
192 OUT_BATCH(verts_per_instance);
193 OUT_BATCH(start_vertex_location);
194 OUT_BATCH(prim->num_instances);
195 OUT_BATCH(0); // start instance location
196 OUT_BATCH(base_vertex_location);
199 intel->batch.need_workaround_flush = true;
201 if (intel->always_flush_cache) {
202 intel_batchbuffer_emit_mi_flush(intel);
206 static void gen7_emit_prim(struct brw_context *brw,
207 const struct _mesa_prim *prim,
210 struct intel_context *intel = &brw->intel;
211 int verts_per_instance;
212 int vertex_access_type;
213 int start_vertex_location;
214 int base_vertex_location;
216 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
217 prim->start, prim->count);
219 start_vertex_location = prim->start;
220 base_vertex_location = prim->basevertex;
222 vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
223 start_vertex_location += brw->ib.start_vertex_offset;
224 base_vertex_location += brw->vb.start_vertex_bias;
226 vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
227 start_vertex_location += brw->vb.start_vertex_bias;
230 verts_per_instance = trim(prim->mode, prim->count);
232 /* If nothing to emit, just return. */
233 if (verts_per_instance == 0)
236 /* If we're set to always flush, do it before and after the primitive emit.
237 * We want to catch both missed flushes that hurt instruction/state cache
238 * and missed flushes of the render cache as it heads to other parts of
239 * the besides the draw code.
241 if (intel->always_flush_cache) {
242 intel_batchbuffer_emit_mi_flush(intel);
246 OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
247 OUT_BATCH(hw_prim | vertex_access_type);
248 OUT_BATCH(verts_per_instance);
249 OUT_BATCH(start_vertex_location);
250 OUT_BATCH(prim->num_instances);
251 OUT_BATCH(0); // start instance location
252 OUT_BATCH(base_vertex_location);
255 if (intel->always_flush_cache) {
256 intel_batchbuffer_emit_mi_flush(intel);
261 static void brw_merge_inputs( struct brw_context *brw,
262 const struct gl_client_array *arrays[])
264 struct brw_vertex_info old = brw->vb.info;
267 for (i = 0; i < brw->vb.nr_buffers; i++) {
268 drm_intel_bo_unreference(brw->vb.buffers[i].bo);
269 brw->vb.buffers[i].bo = NULL;
271 brw->vb.nr_buffers = 0;
273 memset(&brw->vb.info, 0, sizeof(brw->vb.info));
275 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
276 brw->vb.inputs[i].buffer = -1;
277 brw->vb.inputs[i].glarray = arrays[i];
278 brw->vb.inputs[i].attrib = (gl_vert_attrib) i;
280 if (arrays[i]->StrideB != 0)
281 brw->vb.info.sizes[i/16] |= (brw->vb.inputs[i].glarray->Size - 1) <<
285 /* Raise statechanges if input sizes have changed. */
286 if (memcmp(brw->vb.info.sizes, old.sizes, sizeof(old.sizes)) != 0)
287 brw->state.dirty.brw |= BRW_NEW_INPUT_DIMENSIONS;
291 * \brief Resolve buffers before drawing.
293 * Resolve the depth buffer's HiZ buffer and resolve the depth buffer of each
294 * enabled depth texture.
296 * (In the future, this will also perform MSAA resolves).
299 brw_predraw_resolve_buffers(struct brw_context *brw)
301 struct gl_context *ctx = &brw->intel.ctx;
302 struct intel_context *intel = &brw->intel;
303 struct intel_renderbuffer *depth_irb;
304 struct intel_texture_object *tex_obj;
306 /* Resolve the depth buffer's HiZ buffer. */
307 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
309 intel_renderbuffer_resolve_hiz(intel, depth_irb);
311 /* Resolve depth buffer of each enabled depth texture. */
312 for (int i = 0; i < BRW_MAX_TEX_UNIT; i++) {
313 if (!ctx->Texture.Unit[i]._ReallyEnabled)
315 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
316 if (!tex_obj || !tex_obj->mt)
318 intel_miptree_all_slices_resolve_depth(intel, tex_obj->mt);
323 * \brief Call this after drawing to mark which buffers need resolving
325 * If the depth buffer was written to and if it has an accompanying HiZ
326 * buffer, then mark that it needs a depth resolve.
328 * If the color buffer is a multisample window system buffer, then
329 * mark that it needs a downsample.
331 static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
333 struct intel_context *intel = &brw->intel;
334 struct gl_context *ctx = &brw->intel.ctx;
335 struct gl_framebuffer *fb = ctx->DrawBuffer;
337 struct intel_renderbuffer *front_irb = NULL;
338 struct intel_renderbuffer *back_irb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
339 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
341 if (intel->is_front_buffer_rendering)
342 front_irb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
345 intel_renderbuffer_set_needs_downsample(front_irb);
347 intel_renderbuffer_set_needs_downsample(back_irb);
348 if (depth_irb && ctx->Depth.Mask)
349 intel_renderbuffer_set_needs_depth_resolve(depth_irb);
353 verts_per_prim(GLenum mode)
362 case GL_TRIANGLE_STRIP:
363 case GL_TRIANGLE_FAN:
371 "unknown prim type in transform feedback primitive count");
377 * Update internal counters based on the the drawing operation described in
381 brw_update_primitive_count(struct brw_context *brw,
382 const struct _mesa_prim *prim)
384 uint32_t count = count_tessellated_primitives(prim);
385 brw->sol.primitives_generated += count;
386 if (brw->intel.ctx.TransformFeedback.CurrentObject->Active &&
387 !brw->intel.ctx.TransformFeedback.CurrentObject->Paused) {
388 /* Update brw->sol.svbi_0_max_index to reflect the amount by which the
389 * hardware is going to increment SVBI 0 when this drawing operation
390 * occurs. This is necessary because the kernel does not (yet) save and
391 * restore GPU registers when context switching, so we'll need to be
392 * able to reload SVBI 0 with the correct value in case we have to start
393 * a new batch buffer.
395 unsigned verts = verts_per_prim(prim->mode);
396 uint32_t space_avail =
397 (brw->sol.svbi_0_max_index - brw->sol.svbi_0_starting_index) / verts;
398 uint32_t primitives_written = MIN2 (space_avail, count);
399 brw->sol.svbi_0_starting_index += verts * primitives_written;
401 /* And update the TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN query. */
402 brw->sol.primitives_written += primitives_written;
406 /* May fail if out of video memory for texture or vbo upload, or on
407 * fallback conditions.
409 static bool brw_try_draw_prims( struct gl_context *ctx,
410 const struct gl_client_array *arrays[],
411 const struct _mesa_prim *prim,
413 const struct _mesa_index_buffer *ib,
417 struct intel_context *intel = intel_context(ctx);
418 struct brw_context *brw = brw_context(ctx);
421 bool fail_next = false;
424 _mesa_update_state( ctx );
426 /* We have to validate the textures *before* checking for fallbacks;
427 * otherwise, the software fallback won't be able to rely on the
428 * texture state, the firstLevel and lastLevel fields won't be
429 * set in the intel texture object (they'll both be 0), and the
430 * software fallback will segfault if it attempts to access any
431 * texture level other than level 0.
433 brw_validate_textures( brw );
435 intel_prepare_render(intel);
437 /* Resolves must occur after updating renderbuffers, updating context state,
438 * and finalizing textures but before setting up any hardware state for
441 brw_predraw_resolve_buffers(brw);
443 /* Bind all inputs, derive varying and size information:
445 brw_merge_inputs( brw, arrays );
448 brw->state.dirty.brw |= BRW_NEW_INDICES;
450 brw->vb.min_index = min_index;
451 brw->vb.max_index = max_index;
452 brw->state.dirty.brw |= BRW_NEW_VERTICES;
454 /* Have to validate state quite late. Will rebuild tnl_program,
455 * which depends on varying information.
457 * Note this is where brw->vs->prog_data.inputs_read is calculated,
458 * so can't access it earlier.
461 for (i = 0; i < nr_prims; i++) {
462 int estimated_max_prim_size;
464 estimated_max_prim_size = 512; /* batchbuffer commands */
465 estimated_max_prim_size += (BRW_MAX_TEX_UNIT *
466 (sizeof(struct brw_sampler_state) +
467 sizeof(struct gen5_sampler_default_color)));
468 estimated_max_prim_size += 1024; /* gen6 VS push constants */
469 estimated_max_prim_size += 1024; /* gen6 WM push constants */
470 estimated_max_prim_size += 512; /* misc. pad */
472 /* Flush the batch if it's approaching full, so that we don't wrap while
473 * we've got validated state that needs to be in the same batch as the
476 intel_batchbuffer_require_space(intel, estimated_max_prim_size, false);
477 intel_batchbuffer_save_state(intel);
479 brw->num_instances = prim->num_instances;
481 brw_set_prim(brw, &prim[i]);
483 gen6_set_prim(brw, &prim[i]);
486 /* Note that before the loop, brw->state.dirty.brw was set to != 0, and
487 * that the state updated in the loop outside of this block is that in
488 * *_set_prim or intel_batchbuffer_flush(), which only impacts
489 * brw->state.dirty.brw.
491 if (brw->state.dirty.brw) {
492 intel->no_batch_wrap = true;
493 brw_upload_state(brw);
495 if (unlikely(brw->intel.Fallback)) {
496 intel->no_batch_wrap = false;
503 gen7_emit_prim(brw, &prim[i], brw->primitive);
505 brw_emit_prim(brw, &prim[i], brw->primitive);
507 intel->no_batch_wrap = false;
509 if (dri_bufmgr_check_aperture_space(&intel->batch.bo, 1)) {
511 intel_batchbuffer_reset_to_saved(intel);
512 intel_batchbuffer_flush(intel);
516 if (intel_batchbuffer_flush(intel) == -ENOSPC) {
517 static bool warned = false;
520 fprintf(stderr, "i965: Single primitive emit exceeded"
521 "available aperture space\n");
530 if (!_mesa_meta_in_progress(ctx))
531 brw_update_primitive_count(brw, &prim[i]);
534 if (intel->always_flush_batch)
535 intel_batchbuffer_flush(intel);
538 brw_state_cache_check_size(brw);
539 brw_postdraw_set_buffers_need_resolve(brw);
544 void brw_draw_prims( struct gl_context *ctx,
545 const struct _mesa_prim *prim,
547 const struct _mesa_index_buffer *ib,
548 GLboolean index_bounds_valid,
551 struct gl_transform_feedback_object *tfb_vertcount )
553 const struct gl_client_array **arrays = ctx->Array._DrawArrays;
556 if (!_mesa_check_conditional_render(ctx))
559 /* Handle primitive restart if needed */
560 if (brw_handle_primitive_restart(ctx, prim, nr_prims, ib)) {
561 /* The draw was handled, so we can exit now */
565 if (!vbo_all_varyings_in_vbos(arrays)) {
566 if (!index_bounds_valid)
567 vbo_get_minmax_indices(ctx, prim, ib, &min_index, &max_index, nr_prims);
569 /* Decide if we want to rebase. If so we end up recursing once
570 * only into this function.
572 if (min_index != 0 && !vbo_any_varyings_in_vbos(arrays)) {
573 vbo_rebase_prims(ctx, arrays,
575 ib, min_index, max_index,
581 /* Make a first attempt at drawing:
583 retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
585 /* Otherwise, we really are out of memory. Pass the drawing
586 * command to the software tnl module and which will in turn call
587 * swrast to do the drawing.
590 _swsetup_Wakeup(ctx);
592 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
597 void brw_draw_init( struct brw_context *brw )
599 struct gl_context *ctx = &brw->intel.ctx;
600 struct vbo_context *vbo = vbo_context(ctx);
603 /* Register our drawing function:
605 vbo->draw_prims = brw_draw_prims;
607 for (i = 0; i < VERT_ATTRIB_MAX; i++)
608 brw->vb.inputs[i].buffer = -1;
609 brw->vb.nr_buffers = 0;
610 brw->vb.nr_enabled = 0;
613 void brw_draw_destroy( struct brw_context *brw )
617 for (i = 0; i < brw->vb.nr_buffers; i++) {
618 drm_intel_bo_unreference(brw->vb.buffers[i].bo);
619 brw->vb.buffers[i].bo = NULL;
621 brw->vb.nr_buffers = 0;
623 for (i = 0; i < brw->vb.nr_enabled; i++) {
624 brw->vb.enabled[i]->buffer = -1;
626 brw->vb.nr_enabled = 0;
628 drm_intel_bo_unreference(brw->ib.bo);