OSDN Git Service

initial import
[android-x86/device-viliv-s5.git] / psb-kernel-source-4.41.1 / psb_scene.c
1 /**************************************************************************
2  * Copyright (c) 2007, Intel Corporation.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19  * develop this driver.
20  *
21  **************************************************************************/
22 /*
23  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
24  */
25
26 #include "drmP.h"
27 #include "psb_drv.h"
28 #include "psb_scene.h"
29
30 void psb_clear_scene_atomic(struct psb_scene *scene)
31 {
32         int i;
33         struct page *page;
34         void *v;
35
36         for (i = 0; i < scene->clear_num_pages; ++i) {
37                 page = drm_ttm_get_page(scene->hw_data->ttm,
38                                         scene->clear_p_start + i);
39                 if (in_irq())
40                         v = kmap_atomic(page, KM_IRQ0);
41                 else
42                         v = kmap_atomic(page, KM_USER0);
43
44                 memset(v, 0, PAGE_SIZE);
45
46                 if (in_irq())
47                         kunmap_atomic(v, KM_IRQ0);
48                 else
49                         kunmap_atomic(v, KM_USER0);
50         }
51 }
52
53 int psb_clear_scene(struct psb_scene *scene)
54 {
55         struct drm_bo_kmap_obj bmo;
56         int is_iomem;
57         void *addr;
58
59         int ret = drm_bo_kmap(scene->hw_data, scene->clear_p_start,
60                               scene->clear_num_pages, &bmo);
61
62         PSB_DEBUG_RENDER("Scene clear\n");
63         if (ret)
64                 return ret;
65
66         addr = drm_bmo_virtual(&bmo, &is_iomem);
67         BUG_ON(is_iomem);
68         memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
69         drm_bo_kunmap(&bmo);
70
71         return 0;
72 }
73
74 static void psb_destroy_scene_devlocked(struct psb_scene *scene)
75 {
76         if (!scene)
77                 return;
78
79         PSB_DEBUG_RENDER("Scene destroy\n");
80         drm_bo_usage_deref_locked(&scene->hw_data);
81         drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
82 }
83
84 void psb_scene_unref_devlocked(struct psb_scene **scene)
85 {
86         struct psb_scene *tmp_scene = *scene;
87
88         PSB_DEBUG_RENDER("Scene unref\n");
89         *scene = NULL;
90         if (atomic_dec_and_test(&tmp_scene->ref_count)) {
91                 psb_scheduler_remove_scene_refs(tmp_scene);
92                 psb_destroy_scene_devlocked(tmp_scene);
93         }
94 }
95
96 struct psb_scene *psb_scene_ref(struct psb_scene *src)
97 {
98         PSB_DEBUG_RENDER("Scene ref\n");
99         atomic_inc(&src->ref_count);
100         return src;
101 }
102
103 static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
104                                          uint32_t w, uint32_t h)
105 {
106         struct drm_psb_private *dev_priv =
107             (struct drm_psb_private *)dev->dev_private;
108         int ret = -EINVAL;
109         struct psb_scene *scene;
110         uint32_t bo_size;
111         struct psb_xhw_buf buf;
112
113         PSB_DEBUG_RENDER("Alloc scene w %u h %u\n", w, h);
114
115         scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER);
116
117         if (!scene) {
118                 DRM_ERROR("Out of memory allocating scene object.\n");
119                 return NULL;
120         }
121
122         scene->dev = dev;
123         scene->w = w;
124         scene->h = h;
125         scene->hw_scene = NULL;
126         atomic_set(&scene->ref_count, 1);
127
128         INIT_LIST_HEAD(&buf.head);
129         ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
130                                  scene->hw_cookie, &bo_size,
131                                  &scene->clear_p_start,
132                                  &scene->clear_num_pages);
133         if (ret)
134                 goto out_err;
135
136         ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
137                                        DRM_PSB_FLAG_MEM_MMU |
138                                        DRM_BO_FLAG_READ |
139                                        DRM_BO_FLAG_CACHED |
140                                        PSB_BO_FLAG_SCENE |
141                                        DRM_BO_FLAG_WRITE,
142                                        DRM_BO_HINT_DONT_FENCE,
143                                        0, 0, &scene->hw_data);
144         if (ret)
145                 goto out_err;
146
147         return scene;
148       out_err:
149         drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
150         return NULL;
151 }
152
153 int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
154                             uint64_t mask,
155                             uint32_t hint,
156                             uint32_t w,
157                             uint32_t h,
158                             int final_pass, struct psb_scene **scene_p)
159 {
160         struct drm_device *dev = pool->dev;
161         struct drm_psb_private *dev_priv =
162             (struct drm_psb_private *)dev->dev_private;
163         struct psb_scene *scene = pool->scenes[pool->cur_scene];
164         int ret;
165         unsigned long irq_flags;
166         struct psb_scheduler *scheduler = &dev_priv->scheduler;
167         uint32_t bin_pt_offset;
168         uint32_t bin_param_offset;
169
170         PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n", pool->cur_scene);
171
172         if (unlikely(!dev_priv->ta_mem)) {
173                 dev_priv->ta_mem =
174                     psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
175                 if (!dev_priv->ta_mem)
176                         return -ENOMEM;
177
178                 bin_pt_offset = ~0;
179                 bin_param_offset = ~0;
180         } else {
181                 bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
182                 bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
183         }
184
185         pool->w = w;
186         pool->h = h;
187         if (scene && (scene->w != pool->w || scene->h != pool->h)) {
188                 spin_lock_irqsave(&scheduler->lock, irq_flags);
189                 if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
190                         spin_unlock_irqrestore(&scheduler->lock, irq_flags);
191                         DRM_ERROR("Trying to resize a dirty scene.\n");
192                         return -EINVAL;
193                 }
194                 spin_unlock_irqrestore(&scheduler->lock, irq_flags);
195                 mutex_lock(&dev->struct_mutex);
196                 psb_scene_unref_devlocked(&pool->scenes[pool->cur_scene]);
197                 mutex_unlock(&dev->struct_mutex);
198                 scene = NULL;
199         }
200
201         if (!scene) {
202                 pool->scenes[pool->cur_scene] = scene =
203                     psb_alloc_scene(pool->dev, pool->w, pool->h);
204
205                 if (!scene)
206                         return -ENOMEM;
207
208                 scene->flags = PSB_SCENE_FLAG_CLEARED;
209         }
210
211         /*
212          * FIXME: We need atomic bit manipulation here for the
213          * scheduler. For now use the spinlock.
214          */
215
216         spin_lock_irqsave(&scheduler->lock, irq_flags);
217         if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
218                 spin_unlock_irqrestore(&scheduler->lock, irq_flags);
219                 PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
220                 mutex_lock(&scene->hw_data->mutex);
221                 ret = drm_bo_wait(scene->hw_data, 0, 0, 0);
222                 mutex_unlock(&scene->hw_data->mutex);
223                 if (ret)
224                         return ret;
225
226                 ret = psb_clear_scene(scene);
227
228                 if (ret)
229                         return ret;
230                 spin_lock_irqsave(&scheduler->lock, irq_flags);
231                 scene->flags |= PSB_SCENE_FLAG_CLEARED;
232         }
233         spin_unlock_irqrestore(&scheduler->lock, irq_flags);
234
235         ret = drm_bo_do_validate(scene->hw_data, flags, mask, hint,
236                                  PSB_ENGINE_TA, 0, NULL);
237         if (ret)
238                 return ret;
239         ret = drm_bo_do_validate(dev_priv->ta_mem->hw_data, 0, 0, 0,
240                                  PSB_ENGINE_TA, 0, NULL);
241         if (ret)
242                 return ret;
243         ret = drm_bo_do_validate(dev_priv->ta_mem->ta_memory, 0, 0, 0,
244                                  PSB_ENGINE_TA, 0, NULL);
245         if (ret)
246                 return ret;
247
248         if (unlikely(bin_param_offset !=
249                      dev_priv->ta_mem->ta_memory->offset ||
250                      bin_pt_offset !=
251                      dev_priv->ta_mem->hw_data->offset ||
252                      dev_priv->force_ta_mem_load)) {
253
254                 struct psb_xhw_buf buf;
255
256                 INIT_LIST_HEAD(&buf.head);
257                 ret = psb_xhw_ta_mem_load(dev_priv, &buf,
258                                           PSB_TA_MEM_FLAG_TA |
259                                           PSB_TA_MEM_FLAG_RASTER |
260                                           PSB_TA_MEM_FLAG_HOSTA |
261                                           PSB_TA_MEM_FLAG_HOSTD |
262                                           PSB_TA_MEM_FLAG_INIT,
263                                           dev_priv->ta_mem->ta_memory->offset,
264                                           dev_priv->ta_mem->hw_data->offset,
265                                           dev_priv->ta_mem->hw_cookie);
266                 if (ret)
267                         return ret;
268
269                 dev_priv->force_ta_mem_load = 0;
270         }
271
272         if (final_pass) {
273
274                 /*
275                  * Clear the scene on next use. Advance the scene counter.
276                  */
277
278                 spin_lock_irqsave(&scheduler->lock, irq_flags);
279                 scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
280                 spin_unlock_irqrestore(&scheduler->lock, irq_flags);
281                 pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
282         }
283
284         *scene_p = psb_scene_ref(scene);
285         return 0;
286 }
287
288 static void psb_scene_pool_destroy_devlocked(struct psb_scene_pool *pool)
289 {
290         int i;
291
292         if (!pool)
293                 return;
294
295         PSB_DEBUG_RENDER("Scene pool destroy.\n");
296         for (i = 0; i < pool->num_scenes; ++i) {
297                 PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
298                                  (unsigned long)pool->scenes[i]);
299                 if (pool->scenes[i])
300                         psb_scene_unref_devlocked(&pool->scenes[i]);
301         }
302         drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
303 }
304
305 void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool)
306 {
307         struct psb_scene_pool *tmp_pool = *pool;
308         struct drm_device *dev = tmp_pool->dev;
309
310         PSB_DEBUG_RENDER("Scene pool unref\n");
311         (void)dev;
312         DRM_ASSERT_LOCKED(&dev->struct_mutex);
313         *pool = NULL;
314         if (--tmp_pool->ref_count == 0)
315                 psb_scene_pool_destroy_devlocked(tmp_pool);
316 }
317
318 struct psb_scene_pool *psb_scene_pool_ref_devlocked(struct psb_scene_pool *src)
319 {
320         ++src->ref_count;
321         return src;
322 }
323
324 /*
325  * Callback for user object manager.
326  */
327
328 static void psb_scene_pool_destroy(struct drm_file *priv,
329                                    struct drm_user_object *base)
330 {
331         struct psb_scene_pool *pool =
332             drm_user_object_entry(base, struct psb_scene_pool, user);
333
334         psb_scene_pool_unref_devlocked(&pool);
335 }
336
337 struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file *priv,
338                                                        uint32_t handle,
339                                                        int check_owner)
340 {
341         struct drm_user_object *uo;
342         struct psb_scene_pool *pool;
343
344         uo = drm_lookup_user_object(priv, handle);
345         if (!uo || (uo->type != PSB_USER_OBJECT_SCENE_POOL)) {
346                 DRM_ERROR("Could not find scene pool object 0x%08x\n", handle);
347                 return NULL;
348         }
349
350         if (check_owner && priv != uo->owner) {
351                 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
352                         return NULL;
353         }
354
355         pool = drm_user_object_entry(uo, struct psb_scene_pool, user);
356         return psb_scene_pool_ref_devlocked(pool);
357 }
358
359 struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
360                                             int shareable,
361                                             uint32_t num_scenes,
362                                             uint32_t w, uint32_t h)
363 {
364         struct drm_device *dev = priv->head->dev;
365         struct psb_scene_pool *pool;
366         int ret;
367
368         PSB_DEBUG_RENDER("Scene pool alloc\n");
369         pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER);
370         if (!pool) {
371                 DRM_ERROR("Out of memory allocating scene pool object.\n");
372                 return NULL;
373         }
374         pool->w = w;
375         pool->h = h;
376         pool->dev = dev;
377         pool->num_scenes = num_scenes;
378
379         mutex_lock(&dev->struct_mutex);
380         ret = drm_add_user_object(priv, &pool->user, shareable);
381         if (ret)
382                 goto out_err;
383
384         pool->user.type = PSB_USER_OBJECT_SCENE_POOL;
385         pool->user.remove = &psb_scene_pool_destroy;
386         pool->ref_count = 2;
387         mutex_unlock(&dev->struct_mutex);
388         return pool;
389       out_err:
390         drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
391         return NULL;
392 }
393
394 /*
395  * Code to support multiple ta memory buffers.
396  */
397
398 static void psb_destroy_ta_mem_devlocked(struct psb_ta_mem *ta_mem)
399 {
400         if (!ta_mem)
401                 return;
402
403         drm_bo_usage_deref_locked(&ta_mem->hw_data);
404         drm_bo_usage_deref_locked(&ta_mem->ta_memory);
405         drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
406 }
407
408 void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem)
409 {
410         struct psb_ta_mem *tmp_ta_mem = *ta_mem;
411         struct drm_device *dev = tmp_ta_mem->dev;
412
413         (void)dev;
414         DRM_ASSERT_LOCKED(&dev->struct_mutex);
415         *ta_mem = NULL;
416         if (--tmp_ta_mem->ref_count == 0)
417                 psb_destroy_ta_mem_devlocked(tmp_ta_mem);
418 }
419
420 void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst, struct psb_ta_mem *src)
421 {
422         struct drm_device *dev = src->dev;
423
424         (void)dev;
425         DRM_ASSERT_LOCKED(&dev->struct_mutex);
426         *dst = src;
427         ++src->ref_count;
428 }
429
430 struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
431 {
432         struct drm_psb_private *dev_priv =
433             (struct drm_psb_private *)dev->dev_private;
434         int ret = -EINVAL;
435         struct psb_ta_mem *ta_mem;
436         uint32_t bo_size;
437         struct psb_xhw_buf buf;
438
439         INIT_LIST_HEAD(&buf.head);
440
441         ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER);
442
443         if (!ta_mem) {
444                 DRM_ERROR("Out of memory allocating parameter memory.\n");
445                 return NULL;
446         }
447
448         ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
449                                   ta_mem->hw_cookie, &bo_size);
450         if (ret == -ENOMEM) {
451                 DRM_ERROR("Parameter memory size is too small.\n");
452                 DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
453                          (unsigned int)(pages * (PAGE_SIZE / 1024)));
454                 DRM_INFO("The Xpsb driver thinks this is too small and\n");
455                 DRM_INFO("suggests %u kiB. Check the psb DRM\n",
456                          (unsigned int)(bo_size / 1024));
457                 DRM_INFO("\"ta_mem_size\" parameter!\n");
458         }
459         if (ret)
460                 goto out_err0;
461
462         bo_size = pages * PAGE_SIZE;
463         ta_mem->dev = dev;
464         ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
465                                        DRM_PSB_FLAG_MEM_MMU | DRM_BO_FLAG_READ |
466                                        DRM_BO_FLAG_WRITE |
467                                        PSB_BO_FLAG_SCENE,
468                                        DRM_BO_HINT_DONT_FENCE, 0, 0,
469                                        &ta_mem->hw_data);
470         if (ret)
471                 goto out_err0;
472
473         ret =
474             drm_buffer_object_create(dev, pages << PAGE_SHIFT,
475                                      drm_bo_type_kernel,
476                                      DRM_PSB_FLAG_MEM_RASTGEOM |
477                                      DRM_BO_FLAG_READ |
478                                      DRM_BO_FLAG_WRITE |
479                                      PSB_BO_FLAG_SCENE,
480                                      DRM_BO_HINT_DONT_FENCE, 0,
481                                      1024 * 1024 >> PAGE_SHIFT,
482                                      &ta_mem->ta_memory);
483         if (ret)
484                 goto out_err1;
485
486         ta_mem->ref_count = 1;
487         return ta_mem;
488       out_err1:
489         drm_bo_usage_deref_unlocked(&ta_mem->hw_data);
490       out_err0:
491         drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
492         return NULL;
493 }
494
495 int drm_psb_scene_unref_ioctl(struct drm_device *dev,
496                               void *data, struct drm_file *file_priv)
497 {
498         struct drm_psb_scene *scene = (struct drm_psb_scene *)data;
499         struct drm_user_object *uo;
500         struct drm_ref_object *ro;
501         int ret = 0;
502
503         mutex_lock(&dev->struct_mutex);
504         if (!scene->handle_valid)
505                 goto out_unlock;
506
507         uo = drm_lookup_user_object(file_priv, scene->handle);
508         if (!uo) {
509                 ret = -EINVAL;
510                 goto out_unlock;
511         }
512         if (uo->type != PSB_USER_OBJECT_SCENE_POOL) {
513                 DRM_ERROR("Not a scene pool object.\n");
514                 ret = -EINVAL;
515                 goto out_unlock;
516         }
517         if (uo->owner != file_priv) {
518                 DRM_ERROR("Not owner of scene pool object.\n");
519                 ret = -EPERM;
520                 goto out_unlock;
521         }
522
523         scene->handle_valid = 0;
524         ro = drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE);
525         BUG_ON(!ro);
526         drm_remove_ref_object(file_priv, ro);
527
528       out_unlock:
529         mutex_unlock(&dev->struct_mutex);
530         return ret;
531 }