1 /**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19 * develop this driver.
21 **************************************************************************/
23 * Make calls into closed source X server code.
30 psb_xhw_clean_buf(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
32 unsigned long irq_flags;
34 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
35 list_del_init(&buf->head);
36 if (dev_priv->xhw_cur_buf == buf)
37 dev_priv->xhw_cur_buf = NULL;
38 atomic_set(&buf->done, 1);
39 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
42 static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
43 struct psb_xhw_buf *buf)
45 unsigned long irq_flags;
47 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
48 atomic_set(&buf->done, 0);
49 if (unlikely(!dev_priv->xhw_submit_ok)) {
50 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
51 DRM_ERROR("No Xpsb 3D extension available.\n");
54 if (!list_empty(&buf->head)) {
55 DRM_ERROR("Recursive list adding.\n");
58 list_add_tail(&buf->head, &dev_priv->xhw_in);
59 wake_up_interruptible(&dev_priv->xhw_queue);
61 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
65 int psb_xhw_hotplug(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
67 struct drm_psb_xhw_arg *xa = &buf->arg;
71 xa->op = PSB_XHW_HOTPLUG;
75 ret = psb_xhw_add(dev_priv, buf);
79 int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
80 struct psb_xhw_buf *buf,
85 uint32_t * clear_p_start, uint32_t * clear_num_pages)
87 struct drm_psb_xhw_arg *xa = &buf->arg;
91 xa->op = PSB_XHW_SCENE_INFO;
97 ret = psb_xhw_add(dev_priv, buf);
101 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
102 atomic_read(&buf->done), DRM_HZ);
104 if (!atomic_read(&buf->done)) {
105 psb_xhw_clean_buf(dev_priv, buf);
110 memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
111 *bo_size = xa->arg.si.size;
112 *clear_p_start = xa->arg.si.clear_p_start;
113 *clear_num_pages = xa->arg.si.clear_num_pages;
118 int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
119 struct psb_xhw_buf *buf, uint32_t fire_flags)
121 struct drm_psb_xhw_arg *xa = &buf->arg;
124 xa->op = PSB_XHW_FIRE_RASTER;
126 xa->arg.sb.fire_flags = 0;
128 return psb_xhw_add(dev_priv, buf);
131 int psb_xhw_vistest(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
133 struct drm_psb_xhw_arg *xa = &buf->arg;
136 xa->op = PSB_XHW_VISTEST;
138 * Could perhaps decrease latency somewhat by
139 * issuing an irq in this case.
142 xa->irq_op = PSB_UIRQ_VISTEST;
143 return psb_xhw_add(dev_priv, buf);
146 int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
147 struct psb_xhw_buf *buf,
152 uint32_t num_oom_cmds,
153 uint32_t offset, uint32_t engine, uint32_t flags)
155 struct drm_psb_xhw_arg *xa = &buf->arg;
157 buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
158 xa->op = PSB_XHW_SCENE_BIND_FIRE;
159 xa->issue_irq = (buf->copy_back) ? 1 : 0;
160 if (unlikely(buf->copy_back))
161 xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
162 PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
165 xa->arg.sb.fire_flags = fire_flags;
166 xa->arg.sb.hw_context = hw_context;
167 xa->arg.sb.offset = offset;
168 xa->arg.sb.engine = engine;
169 xa->arg.sb.flags = flags;
170 xa->arg.sb.num_oom_cmds = num_oom_cmds;
171 memcpy(xa->cookie, cookie, sizeof(xa->cookie));
173 memcpy(xa->arg.sb.oom_cmds, oom_cmds,
174 sizeof(uint32_t) * num_oom_cmds);
175 return psb_xhw_add(dev_priv, buf);
178 int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
180 struct drm_psb_xhw_arg *xa = &buf->arg;
184 xa->op = PSB_XHW_RESET_DPM;
188 ret = psb_xhw_add(dev_priv, buf);
192 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
193 atomic_read(&buf->done), 3 * DRM_HZ);
195 if (!atomic_read(&buf->done)) {
196 psb_xhw_clean_buf(dev_priv, buf);
203 int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
204 struct psb_xhw_buf *buf, uint32_t * value)
206 struct drm_psb_xhw_arg *xa = &buf->arg;
212 xa->op = PSB_XHW_CHECK_LOCKUP;
216 ret = psb_xhw_add(dev_priv, buf);
220 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
221 atomic_read(&buf->done), DRM_HZ * 3);
223 if (!atomic_read(&buf->done)) {
224 psb_xhw_clean_buf(dev_priv, buf);
229 *value = xa->arg.cl.value;
234 static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
235 struct psb_xhw_buf *buf)
237 struct drm_psb_xhw_arg *xa = &buf->arg;
238 unsigned long irq_flags;
241 xa->op = PSB_XHW_TERMINATE;
244 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
245 dev_priv->xhw_submit_ok = 0;
246 atomic_set(&buf->done, 0);
247 if (!list_empty(&buf->head)) {
248 DRM_ERROR("Recursive list adding.\n");
251 list_add_tail(&buf->head, &dev_priv->xhw_in);
253 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
254 wake_up_interruptible(&dev_priv->xhw_queue);
256 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
257 atomic_read(&buf->done), DRM_HZ / 10);
259 if (!atomic_read(&buf->done)) {
260 DRM_ERROR("Xpsb terminate timeout.\n");
261 psb_xhw_clean_buf(dev_priv, buf);
268 int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
269 struct psb_xhw_buf *buf,
270 uint32_t pages, uint32_t * hw_cookie, uint32_t * size)
272 struct drm_psb_xhw_arg *xa = &buf->arg;
276 xa->op = PSB_XHW_TA_MEM_INFO;
279 xa->arg.bi.pages = pages;
281 ret = psb_xhw_add(dev_priv, buf);
285 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
286 atomic_read(&buf->done), DRM_HZ);
288 if (!atomic_read(&buf->done)) {
289 psb_xhw_clean_buf(dev_priv, buf);
294 memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
296 *size = xa->arg.bi.size;
300 int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
301 struct psb_xhw_buf *buf,
303 uint32_t param_offset,
304 uint32_t pt_offset, uint32_t * hw_cookie)
306 struct drm_psb_xhw_arg *xa = &buf->arg;
310 xa->op = PSB_XHW_TA_MEM_LOAD;
313 xa->arg.bl.flags = flags;
314 xa->arg.bl.param_offset = param_offset;
315 xa->arg.bl.pt_offset = pt_offset;
316 memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
318 ret = psb_xhw_add(dev_priv, buf);
322 (void)wait_event_timeout(dev_priv->xhw_caller_queue,
323 atomic_read(&buf->done), 3 * DRM_HZ);
325 if (!atomic_read(&buf->done)) {
326 psb_xhw_clean_buf(dev_priv, buf);
331 memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
336 int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
337 struct psb_xhw_buf *buf, uint32_t * cookie)
339 struct drm_psb_xhw_arg *xa = &buf->arg;
342 * This calls the extensive closed source
343 * OOM handler, which resolves the condition and
344 * sends a reply telling the scheduler what to do
349 xa->op = PSB_XHW_OOM;
351 xa->irq_op = PSB_UIRQ_OOM_REPLY;
352 memcpy(xa->cookie, cookie, sizeof(xa->cookie));
354 return psb_xhw_add(dev_priv, buf);
357 void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
358 struct psb_xhw_buf *buf,
360 uint32_t * bca, uint32_t * rca, uint32_t * flags)
362 struct drm_psb_xhw_arg *xa = &buf->arg;
365 * Get info about how to schedule an OOM task.
368 memcpy(cookie, xa->cookie, sizeof(xa->cookie));
369 *bca = xa->arg.oom.bca;
370 *rca = xa->arg.oom.rca;
371 *flags = xa->arg.oom.flags;
374 void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
375 struct psb_xhw_buf *buf, uint32_t * cookie)
377 struct drm_psb_xhw_arg *xa = &buf->arg;
379 memcpy(cookie, xa->cookie, sizeof(xa->cookie));
382 int psb_xhw_resume(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
384 struct drm_psb_xhw_arg *xa = &buf->arg;
387 xa->op = PSB_XHW_RESUME;
390 return psb_xhw_add(dev_priv, buf);
393 void psb_xhw_takedown(struct drm_psb_private *dev_priv)
397 int psb_xhw_init(struct drm_device *dev)
399 struct drm_psb_private *dev_priv =
400 (struct drm_psb_private *)dev->dev_private;
401 unsigned long irq_flags;
403 INIT_LIST_HEAD(&dev_priv->xhw_in);
404 dev_priv->xhw_lock = SPIN_LOCK_UNLOCKED;
405 atomic_set(&dev_priv->xhw_client, 0);
406 init_waitqueue_head(&dev_priv->xhw_queue);
407 init_waitqueue_head(&dev_priv->xhw_caller_queue);
408 mutex_init(&dev_priv->xhw_mutex);
409 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
410 dev_priv->xhw_on = 0;
411 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
416 static int psb_xhw_init_init(struct drm_device *dev,
417 struct drm_file *file_priv,
418 struct drm_psb_xhw_init_arg *arg)
420 struct drm_psb_private *dev_priv =
421 (struct drm_psb_private *)dev->dev_private;
425 if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
426 unsigned long irq_flags;
428 mutex_lock(&dev->struct_mutex);
430 drm_lookup_buffer_object(file_priv, arg->buffer_handle, 1);
431 mutex_unlock(&dev->struct_mutex);
432 if (!dev_priv->xhw_bo) {
436 ret = drm_bo_kmap(dev_priv->xhw_bo, 0,
437 dev_priv->xhw_bo->num_pages,
438 &dev_priv->xhw_kmap);
440 DRM_ERROR("Failed mapping X server "
441 "communications buffer.\n");
444 dev_priv->xhw = drm_bmo_virtual(&dev_priv->xhw_kmap, &is_iomem);
446 DRM_ERROR("X server communications buffer"
447 "is in device memory.\n");
451 dev_priv->xhw_file = file_priv;
453 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
454 dev_priv->xhw_on = 1;
455 dev_priv->xhw_submit_ok = 1;
456 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
460 DRM_ERROR("Xhw is already initialized.\n");
464 dev_priv->xhw = NULL;
465 drm_bo_kunmap(&dev_priv->xhw_kmap);
467 drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
469 atomic_dec(&dev_priv->xhw_client);
473 static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
475 struct psb_xhw_buf *cur_buf, *next;
476 unsigned long irq_flags;
478 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
479 dev_priv->xhw_submit_ok = 0;
481 list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
482 list_del_init(&cur_buf->head);
483 if (cur_buf->copy_back) {
484 cur_buf->arg.ret = -EINVAL;
486 atomic_set(&cur_buf->done, 1);
488 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
489 wake_up(&dev_priv->xhw_caller_queue);
492 void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
493 struct drm_file *file_priv, int closing)
496 if (dev_priv->xhw_file == file_priv &&
497 atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
500 psb_xhw_queue_empty(dev_priv);
502 struct psb_xhw_buf buf;
503 INIT_LIST_HEAD(&buf.head);
505 psb_xhw_terminate(dev_priv, &buf);
506 psb_xhw_queue_empty(dev_priv);
509 dev_priv->xhw = NULL;
510 drm_bo_kunmap(&dev_priv->xhw_kmap);
511 drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
512 dev_priv->xhw_file = NULL;
516 int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
517 struct drm_file *file_priv)
519 struct drm_psb_xhw_init_arg *arg = (struct drm_psb_xhw_init_arg *)data;
520 struct drm_psb_private *dev_priv =
521 (struct drm_psb_private *)dev->dev_private;
523 switch (arg->operation) {
525 return psb_xhw_init_init(dev, file_priv, arg);
526 case PSB_XHW_TAKEDOWN:
527 psb_xhw_init_takedown(dev_priv, file_priv, 0);
532 static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
535 unsigned long irq_flags;
537 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
538 empty = list_empty(&dev_priv->xhw_in);
539 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
543 int psb_xhw_handler(struct drm_psb_private *dev_priv)
545 unsigned long irq_flags;
546 struct drm_psb_xhw_arg *xa;
547 struct psb_xhw_buf *buf;
549 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
551 if (!dev_priv->xhw_on) {
552 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
556 buf = dev_priv->xhw_cur_buf;
557 if (buf && buf->copy_back) {
559 memcpy(xa, dev_priv->xhw, sizeof(*xa));
560 dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
561 atomic_set(&buf->done, 1);
562 wake_up(&dev_priv->xhw_caller_queue);
564 dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
566 dev_priv->xhw_cur_buf = 0;
567 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
571 int psb_xhw_ioctl(struct drm_device *dev, void *data,
572 struct drm_file *file_priv)
574 struct drm_psb_private *dev_priv =
575 (struct drm_psb_private *)dev->dev_private;
576 unsigned long irq_flags;
577 struct drm_psb_xhw_arg *xa;
579 struct list_head *list;
580 struct psb_xhw_buf *buf;
585 if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
588 if (psb_forced_user_interrupt(dev_priv)) {
589 mutex_unlock(&dev_priv->xhw_mutex);
593 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
594 while (list_empty(&dev_priv->xhw_in)) {
595 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
596 ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
599 if (ret == -ERESTARTSYS || ret == 0) {
600 mutex_unlock(&dev_priv->xhw_mutex);
603 spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
606 list = dev_priv->xhw_in.next;
609 buf = list_entry(list, struct psb_xhw_buf, head);
611 memcpy(dev_priv->xhw, xa, sizeof(*xa));
613 if (unlikely(buf->copy_back))
614 dev_priv->xhw_cur_buf = buf;
616 atomic_set(&buf->done, 1);
617 dev_priv->xhw_cur_buf = NULL;
620 if (xa->op == PSB_XHW_TERMINATE) {
621 dev_priv->xhw_on = 0;
622 wake_up(&dev_priv->xhw_caller_queue);
624 spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
626 mutex_unlock(&dev_priv->xhw_mutex);