1 /**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19 * develop this driver.
21 **************************************************************************/
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
28 #include "psb_scene.h"
30 void psb_clear_scene_atomic(struct psb_scene *scene)
36 for (i = 0; i < scene->clear_num_pages; ++i) {
37 page = drm_ttm_get_page(scene->hw_data->ttm,
38 scene->clear_p_start + i);
40 v = kmap_atomic(page, KM_IRQ0);
42 v = kmap_atomic(page, KM_USER0);
44 memset(v, 0, PAGE_SIZE);
47 kunmap_atomic(v, KM_IRQ0);
49 kunmap_atomic(v, KM_USER0);
53 int psb_clear_scene(struct psb_scene *scene)
55 struct drm_bo_kmap_obj bmo;
59 int ret = drm_bo_kmap(scene->hw_data, scene->clear_p_start,
60 scene->clear_num_pages, &bmo);
62 PSB_DEBUG_RENDER("Scene clear\n");
66 addr = drm_bmo_virtual(&bmo, &is_iomem);
68 memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
74 static void psb_destroy_scene_devlocked(struct psb_scene *scene)
79 PSB_DEBUG_RENDER("Scene destroy\n");
80 drm_bo_usage_deref_locked(&scene->hw_data);
81 drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
84 void psb_scene_unref_devlocked(struct psb_scene **scene)
86 struct psb_scene *tmp_scene = *scene;
88 PSB_DEBUG_RENDER("Scene unref\n");
90 if (atomic_dec_and_test(&tmp_scene->ref_count)) {
91 psb_scheduler_remove_scene_refs(tmp_scene);
92 psb_destroy_scene_devlocked(tmp_scene);
96 struct psb_scene *psb_scene_ref(struct psb_scene *src)
98 PSB_DEBUG_RENDER("Scene ref\n");
99 atomic_inc(&src->ref_count);
103 static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
104 uint32_t w, uint32_t h)
106 struct drm_psb_private *dev_priv =
107 (struct drm_psb_private *)dev->dev_private;
109 struct psb_scene *scene;
111 struct psb_xhw_buf buf;
113 PSB_DEBUG_RENDER("Alloc scene w %u h %u\n", w, h);
115 scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER);
118 DRM_ERROR("Out of memory allocating scene object.\n");
125 scene->hw_scene = NULL;
126 atomic_set(&scene->ref_count, 1);
128 INIT_LIST_HEAD(&buf.head);
129 ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
130 scene->hw_cookie, &bo_size,
131 &scene->clear_p_start,
132 &scene->clear_num_pages);
136 ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
137 DRM_PSB_FLAG_MEM_MMU |
142 DRM_BO_HINT_DONT_FENCE,
143 0, 0, &scene->hw_data);
149 drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
153 int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
158 int final_pass, struct psb_scene **scene_p)
160 struct drm_device *dev = pool->dev;
161 struct drm_psb_private *dev_priv =
162 (struct drm_psb_private *)dev->dev_private;
163 struct psb_scene *scene = pool->scenes[pool->cur_scene];
165 unsigned long irq_flags;
166 struct psb_scheduler *scheduler = &dev_priv->scheduler;
167 uint32_t bin_pt_offset;
168 uint32_t bin_param_offset;
170 PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n", pool->cur_scene);
172 if (unlikely(!dev_priv->ta_mem)) {
174 psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
175 if (!dev_priv->ta_mem)
179 bin_param_offset = ~0;
181 bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
182 bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
187 if (scene && (scene->w != pool->w || scene->h != pool->h)) {
188 spin_lock_irqsave(&scheduler->lock, irq_flags);
189 if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
190 spin_unlock_irqrestore(&scheduler->lock, irq_flags);
191 DRM_ERROR("Trying to resize a dirty scene.\n");
194 spin_unlock_irqrestore(&scheduler->lock, irq_flags);
195 mutex_lock(&dev->struct_mutex);
196 psb_scene_unref_devlocked(&pool->scenes[pool->cur_scene]);
197 mutex_unlock(&dev->struct_mutex);
202 pool->scenes[pool->cur_scene] = scene =
203 psb_alloc_scene(pool->dev, pool->w, pool->h);
208 scene->flags = PSB_SCENE_FLAG_CLEARED;
212 * FIXME: We need atomic bit manipulation here for the
213 * scheduler. For now use the spinlock.
216 spin_lock_irqsave(&scheduler->lock, irq_flags);
217 if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
218 spin_unlock_irqrestore(&scheduler->lock, irq_flags);
219 PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
220 mutex_lock(&scene->hw_data->mutex);
221 ret = drm_bo_wait(scene->hw_data, 0, 0, 0);
222 mutex_unlock(&scene->hw_data->mutex);
226 ret = psb_clear_scene(scene);
230 spin_lock_irqsave(&scheduler->lock, irq_flags);
231 scene->flags |= PSB_SCENE_FLAG_CLEARED;
233 spin_unlock_irqrestore(&scheduler->lock, irq_flags);
235 ret = drm_bo_do_validate(scene->hw_data, flags, mask, hint,
236 PSB_ENGINE_TA, 0, NULL);
239 ret = drm_bo_do_validate(dev_priv->ta_mem->hw_data, 0, 0, 0,
240 PSB_ENGINE_TA, 0, NULL);
243 ret = drm_bo_do_validate(dev_priv->ta_mem->ta_memory, 0, 0, 0,
244 PSB_ENGINE_TA, 0, NULL);
248 if (unlikely(bin_param_offset !=
249 dev_priv->ta_mem->ta_memory->offset ||
251 dev_priv->ta_mem->hw_data->offset ||
252 dev_priv->force_ta_mem_load)) {
254 struct psb_xhw_buf buf;
256 INIT_LIST_HEAD(&buf.head);
257 ret = psb_xhw_ta_mem_load(dev_priv, &buf,
259 PSB_TA_MEM_FLAG_RASTER |
260 PSB_TA_MEM_FLAG_HOSTA |
261 PSB_TA_MEM_FLAG_HOSTD |
262 PSB_TA_MEM_FLAG_INIT,
263 dev_priv->ta_mem->ta_memory->offset,
264 dev_priv->ta_mem->hw_data->offset,
265 dev_priv->ta_mem->hw_cookie);
269 dev_priv->force_ta_mem_load = 0;
275 * Clear the scene on next use. Advance the scene counter.
278 spin_lock_irqsave(&scheduler->lock, irq_flags);
279 scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
280 spin_unlock_irqrestore(&scheduler->lock, irq_flags);
281 pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
284 *scene_p = psb_scene_ref(scene);
288 static void psb_scene_pool_destroy_devlocked(struct psb_scene_pool *pool)
295 PSB_DEBUG_RENDER("Scene pool destroy.\n");
296 for (i = 0; i < pool->num_scenes; ++i) {
297 PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
298 (unsigned long)pool->scenes[i]);
300 psb_scene_unref_devlocked(&pool->scenes[i]);
302 drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
305 void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool)
307 struct psb_scene_pool *tmp_pool = *pool;
308 struct drm_device *dev = tmp_pool->dev;
310 PSB_DEBUG_RENDER("Scene pool unref\n");
312 DRM_ASSERT_LOCKED(&dev->struct_mutex);
314 if (--tmp_pool->ref_count == 0)
315 psb_scene_pool_destroy_devlocked(tmp_pool);
318 struct psb_scene_pool *psb_scene_pool_ref_devlocked(struct psb_scene_pool *src)
325 * Callback for user object manager.
328 static void psb_scene_pool_destroy(struct drm_file *priv,
329 struct drm_user_object *base)
331 struct psb_scene_pool *pool =
332 drm_user_object_entry(base, struct psb_scene_pool, user);
334 psb_scene_pool_unref_devlocked(&pool);
337 struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file *priv,
341 struct drm_user_object *uo;
342 struct psb_scene_pool *pool;
344 uo = drm_lookup_user_object(priv, handle);
345 if (!uo || (uo->type != PSB_USER_OBJECT_SCENE_POOL)) {
346 DRM_ERROR("Could not find scene pool object 0x%08x\n", handle);
350 if (check_owner && priv != uo->owner) {
351 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
355 pool = drm_user_object_entry(uo, struct psb_scene_pool, user);
356 return psb_scene_pool_ref_devlocked(pool);
359 struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
362 uint32_t w, uint32_t h)
364 struct drm_device *dev = priv->head->dev;
365 struct psb_scene_pool *pool;
368 PSB_DEBUG_RENDER("Scene pool alloc\n");
369 pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER);
371 DRM_ERROR("Out of memory allocating scene pool object.\n");
377 pool->num_scenes = num_scenes;
379 mutex_lock(&dev->struct_mutex);
380 ret = drm_add_user_object(priv, &pool->user, shareable);
384 pool->user.type = PSB_USER_OBJECT_SCENE_POOL;
385 pool->user.remove = &psb_scene_pool_destroy;
387 mutex_unlock(&dev->struct_mutex);
390 drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
395 * Code to support multiple ta memory buffers.
398 static void psb_destroy_ta_mem_devlocked(struct psb_ta_mem *ta_mem)
403 drm_bo_usage_deref_locked(&ta_mem->hw_data);
404 drm_bo_usage_deref_locked(&ta_mem->ta_memory);
405 drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
408 void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem)
410 struct psb_ta_mem *tmp_ta_mem = *ta_mem;
411 struct drm_device *dev = tmp_ta_mem->dev;
414 DRM_ASSERT_LOCKED(&dev->struct_mutex);
416 if (--tmp_ta_mem->ref_count == 0)
417 psb_destroy_ta_mem_devlocked(tmp_ta_mem);
420 void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst, struct psb_ta_mem *src)
422 struct drm_device *dev = src->dev;
425 DRM_ASSERT_LOCKED(&dev->struct_mutex);
430 struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
432 struct drm_psb_private *dev_priv =
433 (struct drm_psb_private *)dev->dev_private;
435 struct psb_ta_mem *ta_mem;
437 struct psb_xhw_buf buf;
439 INIT_LIST_HEAD(&buf.head);
441 ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER);
444 DRM_ERROR("Out of memory allocating parameter memory.\n");
448 ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
449 ta_mem->hw_cookie, &bo_size);
450 if (ret == -ENOMEM) {
451 DRM_ERROR("Parameter memory size is too small.\n");
452 DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
453 (unsigned int)(pages * (PAGE_SIZE / 1024)));
454 DRM_INFO("The Xpsb driver thinks this is too small and\n");
455 DRM_INFO("suggests %u kiB. Check the psb DRM\n",
456 (unsigned int)(bo_size / 1024));
457 DRM_INFO("\"ta_mem_size\" parameter!\n");
462 bo_size = pages * PAGE_SIZE;
464 ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
465 DRM_PSB_FLAG_MEM_MMU | DRM_BO_FLAG_READ |
468 DRM_BO_HINT_DONT_FENCE, 0, 0,
474 drm_buffer_object_create(dev, pages << PAGE_SHIFT,
476 DRM_PSB_FLAG_MEM_RASTGEOM |
480 DRM_BO_HINT_DONT_FENCE, 0,
481 1024 * 1024 >> PAGE_SHIFT,
486 ta_mem->ref_count = 1;
489 drm_bo_usage_deref_unlocked(&ta_mem->hw_data);
491 drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
495 int drm_psb_scene_unref_ioctl(struct drm_device *dev,
496 void *data, struct drm_file *file_priv)
498 struct drm_psb_scene *scene = (struct drm_psb_scene *)data;
499 struct drm_user_object *uo;
500 struct drm_ref_object *ro;
503 mutex_lock(&dev->struct_mutex);
504 if (!scene->handle_valid)
507 uo = drm_lookup_user_object(file_priv, scene->handle);
512 if (uo->type != PSB_USER_OBJECT_SCENE_POOL) {
513 DRM_ERROR("Not a scene pool object.\n");
517 if (uo->owner != file_priv) {
518 DRM_ERROR("Not owner of scene pool object.\n");
523 scene->handle_valid = 0;
524 ro = drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE);
526 drm_remove_ref_object(file_priv, ro);
529 mutex_unlock(&dev->struct_mutex);