blob: 85783ef45dbf47f198e8f4e34cafdd3c954fc07d [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <drm/drm_atomic.h>
29#include <drm/drm_atomic_helper.h>
30#include <drm/drm_damage_helper.h>
31#include <drm/drm_fourcc.h>
32#include <drm/drm_plane_helper.h>
33#include <drm/drm_rect.h>
34#include <drm/drm_sysfs.h>
35#include <drm/drm_vblank.h>
36
37#include "vmwgfx_kms.h"
38
39/* Might need a hrtimer here? */
40#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
41
42void vmw_du_cleanup(struct vmw_display_unit *du)
43{
44 drm_plane_cleanup(&du->primary);
45 drm_plane_cleanup(&du->cursor);
46
47 drm_connector_unregister(&du->connector);
48 drm_crtc_cleanup(&du->crtc);
49 drm_encoder_cleanup(&du->encoder);
50 drm_connector_cleanup(&du->connector);
51}
52
53/*
54 * Display Unit Cursor functions
55 */
56
57static int vmw_cursor_update_image(struct vmw_private *dev_priv,
58 u32 *image, u32 width, u32 height,
59 u32 hotspotX, u32 hotspotY)
60{
61 struct {
62 u32 cmd;
63 SVGAFifoCmdDefineAlphaCursor cursor;
64 } *cmd;
65 u32 image_size = width * height * 4;
66 u32 cmd_size = sizeof(*cmd) + image_size;
67
68 if (!image)
69 return -EINVAL;
70
71 cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
72 if (unlikely(cmd == NULL))
73 return -ENOMEM;
74
75 memset(cmd, 0, sizeof(*cmd));
76
77 memcpy(&cmd[1], image, image_size);
78
79 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
80 cmd->cursor.id = 0;
81 cmd->cursor.width = width;
82 cmd->cursor.height = height;
83 cmd->cursor.hotspotX = hotspotX;
84 cmd->cursor.hotspotY = hotspotY;
85
86 vmw_fifo_commit_flush(dev_priv, cmd_size);
87
88 return 0;
89}
90
91static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
92 struct vmw_buffer_object *bo,
93 u32 width, u32 height,
94 u32 hotspotX, u32 hotspotY)
95{
96 struct ttm_bo_kmap_obj map;
97 unsigned long kmap_offset;
98 unsigned long kmap_num;
99 void *virtual;
100 bool dummy;
101 int ret;
102
103 kmap_offset = 0;
104 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
105
106 ret = ttm_bo_reserve(&bo->base, true, false, NULL);
107 if (unlikely(ret != 0)) {
108 DRM_ERROR("reserve failed\n");
109 return -EINVAL;
110 }
111
112 ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
113 if (unlikely(ret != 0))
114 goto err_unreserve;
115
116 virtual = ttm_kmap_obj_virtual(&map, &dummy);
117 ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
118 hotspotX, hotspotY);
119
120 ttm_bo_kunmap(&map);
121err_unreserve:
122 ttm_bo_unreserve(&bo->base);
123
124 return ret;
125}
126
127
128static void vmw_cursor_update_position(struct vmw_private *dev_priv,
129 bool show, int x, int y)
130{
131 u32 *fifo_mem = dev_priv->mmio_virt;
132 uint32_t count;
133
134 spin_lock(&dev_priv->cursor_lock);
135 vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
136 vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
137 vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
138 count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
139 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
140 spin_unlock(&dev_priv->cursor_lock);
141}
142
143
144void vmw_kms_cursor_snoop(struct vmw_surface *srf,
145 struct ttm_object_file *tfile,
146 struct ttm_buffer_object *bo,
147 SVGA3dCmdHeader *header)
148{
149 struct ttm_bo_kmap_obj map;
150 unsigned long kmap_offset;
151 unsigned long kmap_num;
152 SVGA3dCopyBox *box;
153 unsigned box_count;
154 void *virtual;
155 bool dummy;
156 struct vmw_dma_cmd {
157 SVGA3dCmdHeader header;
158 SVGA3dCmdSurfaceDMA dma;
159 } *cmd;
160 int i, ret;
161
162 cmd = container_of(header, struct vmw_dma_cmd, header);
163
164 /* No snooper installed */
165 if (!srf->snooper.image)
166 return;
167
168 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
169 DRM_ERROR("face and mipmap for cursors should never != 0\n");
170 return;
171 }
172
173 if (cmd->header.size < 64) {
174 DRM_ERROR("at least one full copy box must be given\n");
175 return;
176 }
177
178 box = (SVGA3dCopyBox *)&cmd[1];
179 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
180 sizeof(SVGA3dCopyBox);
181
182 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
183 box->x != 0 || box->y != 0 || box->z != 0 ||
184 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
185 box->d != 1 || box_count != 1 ||
186 box->w > 64 || box->h > 64) {
187 /* TODO handle none page aligned offsets */
188 /* TODO handle more dst & src != 0 */
189 /* TODO handle more then one copy */
190 DRM_ERROR("Cant snoop dma request for cursor!\n");
191 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
192 box->srcx, box->srcy, box->srcz,
193 box->x, box->y, box->z,
194 box->w, box->h, box->d, box_count,
195 cmd->dma.guest.ptr.offset);
196 return;
197 }
198
199 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
200 kmap_num = (64*64*4) >> PAGE_SHIFT;
201
202 ret = ttm_bo_reserve(bo, true, false, NULL);
203 if (unlikely(ret != 0)) {
204 DRM_ERROR("reserve failed\n");
205 return;
206 }
207
208 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
209 if (unlikely(ret != 0))
210 goto err_unreserve;
211
212 virtual = ttm_kmap_obj_virtual(&map, &dummy);
213
214 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
215 memcpy(srf->snooper.image, virtual, 64*64*4);
216 } else {
217 /* Image is unsigned pointer. */
218 for (i = 0; i < box->h; i++)
219 memcpy(srf->snooper.image + i * 64,
220 virtual + i * cmd->dma.guest.pitch,
221 box->w * 4);
222 }
223
224 srf->snooper.age++;
225
226 ttm_bo_kunmap(&map);
227err_unreserve:
228 ttm_bo_unreserve(bo);
229}
230
231/**
232 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
233 *
234 * @dev_priv: Pointer to the device private struct.
235 *
236 * Clears all legacy hotspots.
237 */
238void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
239{
240 struct drm_device *dev = dev_priv->dev;
241 struct vmw_display_unit *du;
242 struct drm_crtc *crtc;
243
244 drm_modeset_lock_all(dev);
245 drm_for_each_crtc(crtc, dev) {
246 du = vmw_crtc_to_du(crtc);
247
248 du->hotspot_x = 0;
249 du->hotspot_y = 0;
250 }
251 drm_modeset_unlock_all(dev);
252}
253
254void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
255{
256 struct drm_device *dev = dev_priv->dev;
257 struct vmw_display_unit *du;
258 struct drm_crtc *crtc;
259
260 mutex_lock(&dev->mode_config.mutex);
261
262 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
263 du = vmw_crtc_to_du(crtc);
264 if (!du->cursor_surface ||
265 du->cursor_age == du->cursor_surface->snooper.age)
266 continue;
267
268 du->cursor_age = du->cursor_surface->snooper.age;
269 vmw_cursor_update_image(dev_priv,
270 du->cursor_surface->snooper.image,
271 64, 64,
272 du->hotspot_x + du->core_hotspot_x,
273 du->hotspot_y + du->core_hotspot_y);
274 }
275
276 mutex_unlock(&dev->mode_config.mutex);
277}
278
279
280void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
281{
282 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
283
284 drm_plane_cleanup(plane);
285}
286
287
288void vmw_du_primary_plane_destroy(struct drm_plane *plane)
289{
290 drm_plane_cleanup(plane);
291
292 /* Planes are static in our case so we don't free it */
293}
294
295
296/**
297 * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
298 *
299 * @vps: plane state associated with the display surface
300 * @unreference: true if we also want to unreference the display.
301 */
302void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
303 bool unreference)
304{
305 if (vps->surf) {
306 if (vps->pinned) {
307 vmw_resource_unpin(&vps->surf->res);
308 vps->pinned--;
309 }
310
311 if (unreference) {
312 if (vps->pinned)
313 DRM_ERROR("Surface still pinned\n");
314 vmw_surface_unreference(&vps->surf);
315 }
316 }
317}
318
319
320/**
321 * vmw_du_plane_cleanup_fb - Unpins the cursor
322 *
323 * @plane: display plane
324 * @old_state: Contains the FB to clean up
325 *
326 * Unpins the framebuffer surface
327 *
328 * Returns 0 on success
329 */
330void
331vmw_du_plane_cleanup_fb(struct drm_plane *plane,
332 struct drm_plane_state *old_state)
333{
334 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
335
336 vmw_du_plane_unpin_surf(vps, false);
337}
338
339
340/**
341 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
342 *
343 * @plane: display plane
344 * @new_state: info on the new plane state, including the FB
345 *
346 * Returns 0 on success
347 */
348int
349vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
350 struct drm_plane_state *new_state)
351{
352 struct drm_framebuffer *fb = new_state->fb;
353 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
354
355
356 if (vps->surf)
357 vmw_surface_unreference(&vps->surf);
358
359 if (vps->bo)
360 vmw_bo_unreference(&vps->bo);
361
362 if (fb) {
363 if (vmw_framebuffer_to_vfb(fb)->bo) {
364 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
365 vmw_bo_reference(vps->bo);
366 } else {
367 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
368 vmw_surface_reference(vps->surf);
369 }
370 }
371
372 return 0;
373}
374
375
376void
377vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
378 struct drm_plane_state *old_state)
379{
380 struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
381 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
382 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
383 struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
384 s32 hotspot_x, hotspot_y;
385 int ret = 0;
386
387
388 hotspot_x = du->hotspot_x;
389 hotspot_y = du->hotspot_y;
390
391 if (plane->state->fb) {
392 hotspot_x += plane->state->fb->hot_x;
393 hotspot_y += plane->state->fb->hot_y;
394 }
395
396 du->cursor_surface = vps->surf;
397 du->cursor_bo = vps->bo;
398
399 if (vps->surf) {
400 du->cursor_age = du->cursor_surface->snooper.age;
401
402 ret = vmw_cursor_update_image(dev_priv,
403 vps->surf->snooper.image,
404 64, 64, hotspot_x,
405 hotspot_y);
406 } else if (vps->bo) {
407 ret = vmw_cursor_update_bo(dev_priv, vps->bo,
408 plane->state->crtc_w,
409 plane->state->crtc_h,
410 hotspot_x, hotspot_y);
411 } else {
412 vmw_cursor_update_position(dev_priv, false, 0, 0);
413 return;
414 }
415
416 if (!ret) {
417 du->cursor_x = plane->state->crtc_x + du->set_gui_x;
418 du->cursor_y = plane->state->crtc_y + du->set_gui_y;
419
420 vmw_cursor_update_position(dev_priv, true,
421 du->cursor_x + hotspot_x,
422 du->cursor_y + hotspot_y);
423
424 du->core_hotspot_x = hotspot_x - du->hotspot_x;
425 du->core_hotspot_y = hotspot_y - du->hotspot_y;
426 } else {
427 DRM_ERROR("Failed to update cursor image\n");
428 }
429}
430
431
432/**
433 * vmw_du_primary_plane_atomic_check - check if the new state is okay
434 *
435 * @plane: display plane
436 * @state: info on the new plane state, including the FB
437 *
438 * Check if the new state is settable given the current state. Other
439 * than what the atomic helper checks, we care about crtc fitting
440 * the FB and maintaining one active framebuffer.
441 *
442 * Returns 0 on success
443 */
444int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
445 struct drm_plane_state *state)
446{
447 struct drm_crtc_state *crtc_state = NULL;
448 struct drm_framebuffer *new_fb = state->fb;
449 int ret;
450
451 if (state->crtc)
452 crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
453
454 ret = drm_atomic_helper_check_plane_state(state, crtc_state,
455 DRM_PLANE_HELPER_NO_SCALING,
456 DRM_PLANE_HELPER_NO_SCALING,
457 false, true);
458
459 if (!ret && new_fb) {
460 struct drm_crtc *crtc = state->crtc;
461 struct vmw_connector_state *vcs;
462 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
463
464 vcs = vmw_connector_state_to_vcs(du->connector.state);
465 }
466
467
468 return ret;
469}
470
471
472/**
473 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
474 *
475 * @plane: cursor plane
476 * @state: info on the new plane state
477 *
478 * This is a chance to fail if the new cursor state does not fit
479 * our requirements.
480 *
481 * Returns 0 on success
482 */
483int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
484 struct drm_plane_state *new_state)
485{
486 int ret = 0;
487 struct drm_crtc_state *crtc_state = NULL;
488 struct vmw_surface *surface = NULL;
489 struct drm_framebuffer *fb = new_state->fb;
490
491 if (new_state->crtc)
492 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
493 new_state->crtc);
494
495 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
496 DRM_PLANE_HELPER_NO_SCALING,
497 DRM_PLANE_HELPER_NO_SCALING,
498 true, true);
499 if (ret)
500 return ret;
501
502 /* Turning off */
503 if (!fb)
504 return 0;
505
506 /* A lot of the code assumes this */
507 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
508 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
509 new_state->crtc_w, new_state->crtc_h);
510 ret = -EINVAL;
511 }
512
513 if (!vmw_framebuffer_to_vfb(fb)->bo)
514 surface = vmw_framebuffer_to_vfbs(fb)->surface;
515
516 if (surface && !surface->snooper.image) {
517 DRM_ERROR("surface not suitable for cursor\n");
518 ret = -EINVAL;
519 }
520
521 return ret;
522}
523
524
525int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
526 struct drm_crtc_state *new_state)
527{
528 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
529 int connector_mask = drm_connector_mask(&du->connector);
530 bool has_primary = new_state->plane_mask &
531 drm_plane_mask(crtc->primary);
532
533 /* We always want to have an active plane with an active CRTC */
534 if (has_primary != new_state->enable)
535 return -EINVAL;
536
537
538 if (new_state->connector_mask != connector_mask &&
539 new_state->connector_mask != 0) {
540 DRM_ERROR("Invalid connectors configuration\n");
541 return -EINVAL;
542 }
543
544 /*
545 * Our virtual device does not have a dot clock, so use the logical
546 * clock value as the dot clock.
547 */
548 if (new_state->mode.crtc_clock == 0)
549 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
550
551 return 0;
552}
553
554
555void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
556 struct drm_crtc_state *old_crtc_state)
557{
558}
559
560
561void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
562 struct drm_crtc_state *old_crtc_state)
563{
564 struct drm_pending_vblank_event *event = crtc->state->event;
565
566 if (event) {
567 crtc->state->event = NULL;
568
569 spin_lock_irq(&crtc->dev->event_lock);
570 drm_crtc_send_vblank_event(crtc, event);
571 spin_unlock_irq(&crtc->dev->event_lock);
572 }
573}
574
575
576/**
577 * vmw_du_crtc_duplicate_state - duplicate crtc state
578 * @crtc: DRM crtc
579 *
580 * Allocates and returns a copy of the crtc state (both common and
581 * vmw-specific) for the specified crtc.
582 *
583 * Returns: The newly allocated crtc state, or NULL on failure.
584 */
585struct drm_crtc_state *
586vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
587{
588 struct drm_crtc_state *state;
589 struct vmw_crtc_state *vcs;
590
591 if (WARN_ON(!crtc->state))
592 return NULL;
593
594 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
595
596 if (!vcs)
597 return NULL;
598
599 state = &vcs->base;
600
601 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
602
603 return state;
604}
605
606
607/**
608 * vmw_du_crtc_reset - creates a blank vmw crtc state
609 * @crtc: DRM crtc
610 *
611 * Resets the atomic state for @crtc by freeing the state pointer (which
612 * might be NULL, e.g. at driver load time) and allocating a new empty state
613 * object.
614 */
615void vmw_du_crtc_reset(struct drm_crtc *crtc)
616{
617 struct vmw_crtc_state *vcs;
618
619
620 if (crtc->state) {
621 __drm_atomic_helper_crtc_destroy_state(crtc->state);
622
623 kfree(vmw_crtc_state_to_vcs(crtc->state));
624 }
625
626 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
627
628 if (!vcs) {
629 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
630 return;
631 }
632
633 crtc->state = &vcs->base;
634 crtc->state->crtc = crtc;
635}
636
637
638/**
639 * vmw_du_crtc_destroy_state - destroy crtc state
640 * @crtc: DRM crtc
641 * @state: state object to destroy
642 *
643 * Destroys the crtc state (both common and vmw-specific) for the
644 * specified plane.
645 */
646void
647vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
648 struct drm_crtc_state *state)
649{
650 drm_atomic_helper_crtc_destroy_state(crtc, state);
651}
652
653
654/**
655 * vmw_du_plane_duplicate_state - duplicate plane state
656 * @plane: drm plane
657 *
658 * Allocates and returns a copy of the plane state (both common and
659 * vmw-specific) for the specified plane.
660 *
661 * Returns: The newly allocated plane state, or NULL on failure.
662 */
663struct drm_plane_state *
664vmw_du_plane_duplicate_state(struct drm_plane *plane)
665{
666 struct drm_plane_state *state;
667 struct vmw_plane_state *vps;
668
669 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
670
671 if (!vps)
672 return NULL;
673
674 vps->pinned = 0;
675 vps->cpp = 0;
676
677 /* Each ref counted resource needs to be acquired again */
678 if (vps->surf)
679 (void) vmw_surface_reference(vps->surf);
680
681 if (vps->bo)
682 (void) vmw_bo_reference(vps->bo);
683
684 state = &vps->base;
685
686 __drm_atomic_helper_plane_duplicate_state(plane, state);
687
688 return state;
689}
690
691
692/**
693 * vmw_du_plane_reset - creates a blank vmw plane state
694 * @plane: drm plane
695 *
696 * Resets the atomic state for @plane by freeing the state pointer (which might
697 * be NULL, e.g. at driver load time) and allocating a new empty state object.
698 */
699void vmw_du_plane_reset(struct drm_plane *plane)
700{
701 struct vmw_plane_state *vps;
702
703
704 if (plane->state)
705 vmw_du_plane_destroy_state(plane, plane->state);
706
707 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
708
709 if (!vps) {
710 DRM_ERROR("Cannot allocate vmw_plane_state\n");
711 return;
712 }
713
714 __drm_atomic_helper_plane_reset(plane, &vps->base);
715}
716
717
718/**
719 * vmw_du_plane_destroy_state - destroy plane state
720 * @plane: DRM plane
721 * @state: state object to destroy
722 *
723 * Destroys the plane state (both common and vmw-specific) for the
724 * specified plane.
725 */
726void
727vmw_du_plane_destroy_state(struct drm_plane *plane,
728 struct drm_plane_state *state)
729{
730 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
731
732
733 /* Should have been freed by cleanup_fb */
734 if (vps->surf)
735 vmw_surface_unreference(&vps->surf);
736
737 if (vps->bo)
738 vmw_bo_unreference(&vps->bo);
739
740 drm_atomic_helper_plane_destroy_state(plane, state);
741}
742
743
744/**
745 * vmw_du_connector_duplicate_state - duplicate connector state
746 * @connector: DRM connector
747 *
748 * Allocates and returns a copy of the connector state (both common and
749 * vmw-specific) for the specified connector.
750 *
751 * Returns: The newly allocated connector state, or NULL on failure.
752 */
753struct drm_connector_state *
754vmw_du_connector_duplicate_state(struct drm_connector *connector)
755{
756 struct drm_connector_state *state;
757 struct vmw_connector_state *vcs;
758
759 if (WARN_ON(!connector->state))
760 return NULL;
761
762 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
763
764 if (!vcs)
765 return NULL;
766
767 state = &vcs->base;
768
769 __drm_atomic_helper_connector_duplicate_state(connector, state);
770
771 return state;
772}
773
774
775/**
776 * vmw_du_connector_reset - creates a blank vmw connector state
777 * @connector: DRM connector
778 *
779 * Resets the atomic state for @connector by freeing the state pointer (which
780 * might be NULL, e.g. at driver load time) and allocating a new empty state
781 * object.
782 */
783void vmw_du_connector_reset(struct drm_connector *connector)
784{
785 struct vmw_connector_state *vcs;
786
787
788 if (connector->state) {
789 __drm_atomic_helper_connector_destroy_state(connector->state);
790
791 kfree(vmw_connector_state_to_vcs(connector->state));
792 }
793
794 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
795
796 if (!vcs) {
797 DRM_ERROR("Cannot allocate vmw_connector_state\n");
798 return;
799 }
800
801 __drm_atomic_helper_connector_reset(connector, &vcs->base);
802}
803
804
805/**
806 * vmw_du_connector_destroy_state - destroy connector state
807 * @connector: DRM connector
808 * @state: state object to destroy
809 *
810 * Destroys the connector state (both common and vmw-specific) for the
811 * specified plane.
812 */
813void
814vmw_du_connector_destroy_state(struct drm_connector *connector,
815 struct drm_connector_state *state)
816{
817 drm_atomic_helper_connector_destroy_state(connector, state);
818}
819/*
820 * Generic framebuffer code
821 */
822
823/*
824 * Surface framebuffer code
825 */
826
827static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
828{
829 struct vmw_framebuffer_surface *vfbs =
830 vmw_framebuffer_to_vfbs(framebuffer);
831
832 drm_framebuffer_cleanup(framebuffer);
833 vmw_surface_unreference(&vfbs->surface);
834 if (vfbs->base.user_obj)
835 ttm_base_object_unref(&vfbs->base.user_obj);
836
837 kfree(vfbs);
838}
839
840/**
841 * vmw_kms_readback - Perform a readback from the screen system to
842 * a buffer-object backed framebuffer.
843 *
844 * @dev_priv: Pointer to the device private structure.
845 * @file_priv: Pointer to a struct drm_file identifying the caller.
846 * Must be set to NULL if @user_fence_rep is NULL.
847 * @vfb: Pointer to the buffer-object backed framebuffer.
848 * @user_fence_rep: User-space provided structure for fence information.
849 * Must be set to non-NULL if @file_priv is non-NULL.
850 * @vclips: Array of clip rects.
851 * @num_clips: Number of clip rects in @vclips.
852 *
853 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
854 * interrupted.
855 */
856int vmw_kms_readback(struct vmw_private *dev_priv,
857 struct drm_file *file_priv,
858 struct vmw_framebuffer *vfb,
859 struct drm_vmw_fence_rep __user *user_fence_rep,
860 struct drm_vmw_rect *vclips,
861 uint32_t num_clips)
862{
863 switch (dev_priv->active_display_unit) {
864 case vmw_du_screen_object:
865 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
866 user_fence_rep, vclips, num_clips,
867 NULL);
868 case vmw_du_screen_target:
869 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
870 user_fence_rep, NULL, vclips, num_clips,
871 1, false, true, NULL);
872 default:
873 WARN_ONCE(true,
874 "Readback called with invalid display system.\n");
875}
876
877 return -ENOSYS;
878}
879
880
881static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
882 .destroy = vmw_framebuffer_surface_destroy,
883 .dirty = drm_atomic_helper_dirtyfb,
884};
885
886static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
887 struct vmw_surface *surface,
888 struct vmw_framebuffer **out,
889 const struct drm_mode_fb_cmd2
890 *mode_cmd,
891 bool is_bo_proxy)
892
893{
894 struct drm_device *dev = dev_priv->dev;
895 struct vmw_framebuffer_surface *vfbs;
896 enum SVGA3dSurfaceFormat format;
897 int ret;
898 struct drm_format_name_buf format_name;
899
900 /* 3D is only supported on HWv8 and newer hosts */
901 if (dev_priv->active_display_unit == vmw_du_legacy)
902 return -ENOSYS;
903
904 /*
905 * Sanity checks.
906 */
907
908 /* Surface must be marked as a scanout. */
909 if (unlikely(!surface->scanout))
910 return -EINVAL;
911
912 if (unlikely(surface->mip_levels[0] != 1 ||
913 surface->num_sizes != 1 ||
914 surface->base_size.width < mode_cmd->width ||
915 surface->base_size.height < mode_cmd->height ||
916 surface->base_size.depth != 1)) {
917 DRM_ERROR("Incompatible surface dimensions "
918 "for requested mode.\n");
919 return -EINVAL;
920 }
921
922 switch (mode_cmd->pixel_format) {
923 case DRM_FORMAT_ARGB8888:
924 format = SVGA3D_A8R8G8B8;
925 break;
926 case DRM_FORMAT_XRGB8888:
927 format = SVGA3D_X8R8G8B8;
928 break;
929 case DRM_FORMAT_RGB565:
930 format = SVGA3D_R5G6B5;
931 break;
932 case DRM_FORMAT_XRGB1555:
933 format = SVGA3D_A1R5G5B5;
934 break;
935 default:
936 DRM_ERROR("Invalid pixel format: %s\n",
937 drm_get_format_name(mode_cmd->pixel_format, &format_name));
938 return -EINVAL;
939 }
940
941 /*
942 * For DX, surface format validation is done when surface->scanout
943 * is set.
944 */
945 if (!dev_priv->has_dx && format != surface->format) {
946 DRM_ERROR("Invalid surface format for requested mode.\n");
947 return -EINVAL;
948 }
949
950 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
951 if (!vfbs) {
952 ret = -ENOMEM;
953 goto out_err1;
954 }
955
956 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
957 vfbs->surface = vmw_surface_reference(surface);
958 vfbs->base.user_handle = mode_cmd->handles[0];
959 vfbs->is_bo_proxy = is_bo_proxy;
960
961 *out = &vfbs->base;
962
963 ret = drm_framebuffer_init(dev, &vfbs->base.base,
964 &vmw_framebuffer_surface_funcs);
965 if (ret)
966 goto out_err2;
967
968 return 0;
969
970out_err2:
971 vmw_surface_unreference(&surface);
972 kfree(vfbs);
973out_err1:
974 return ret;
975}
976
977/*
978 * Buffer-object framebuffer code
979 */
980
981static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
982{
983 struct vmw_framebuffer_bo *vfbd =
984 vmw_framebuffer_to_vfbd(framebuffer);
985
986 drm_framebuffer_cleanup(framebuffer);
987 vmw_bo_unreference(&vfbd->buffer);
988 if (vfbd->base.user_obj)
989 ttm_base_object_unref(&vfbd->base.user_obj);
990
991 kfree(vfbd);
992}
993
994static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
995 struct drm_file *file_priv,
996 unsigned int flags, unsigned int color,
997 struct drm_clip_rect *clips,
998 unsigned int num_clips)
999{
1000 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1001 struct vmw_framebuffer_bo *vfbd =
1002 vmw_framebuffer_to_vfbd(framebuffer);
1003 struct drm_clip_rect norect;
1004 int ret, increment = 1;
1005
1006 drm_modeset_lock_all(dev_priv->dev);
1007
1008 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1009 if (unlikely(ret != 0)) {
1010 drm_modeset_unlock_all(dev_priv->dev);
1011 return ret;
1012 }
1013
1014 if (!num_clips) {
1015 num_clips = 1;
1016 clips = &norect;
1017 norect.x1 = norect.y1 = 0;
1018 norect.x2 = framebuffer->width;
1019 norect.y2 = framebuffer->height;
1020 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
1021 num_clips /= 2;
1022 increment = 2;
1023 }
1024
1025 switch (dev_priv->active_display_unit) {
1026 case vmw_du_legacy:
1027 ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
1028 clips, num_clips, increment);
1029 break;
1030 default:
1031 ret = -EINVAL;
1032 WARN_ONCE(true, "Dirty called with invalid display system.\n");
1033 break;
1034 }
1035
1036 vmw_fifo_flush(dev_priv, false);
1037 ttm_read_unlock(&dev_priv->reservation_sem);
1038
1039 drm_modeset_unlock_all(dev_priv->dev);
1040
1041 return ret;
1042}
1043
1044static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
1045 struct drm_file *file_priv,
1046 unsigned int flags, unsigned int color,
1047 struct drm_clip_rect *clips,
1048 unsigned int num_clips)
1049{
1050 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1051
1052 if (dev_priv->active_display_unit == vmw_du_legacy)
1053 return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
1054 color, clips, num_clips);
1055
1056 return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
1057 clips, num_clips);
1058}
1059
1060static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1061 .destroy = vmw_framebuffer_bo_destroy,
1062 .dirty = vmw_framebuffer_bo_dirty_ext,
1063};
1064
1065/**
1066 * Pin the bofer in a location suitable for access by the
1067 * display system.
1068 */
1069static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1070{
1071 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1072 struct vmw_buffer_object *buf;
1073 struct ttm_placement *placement;
1074 int ret;
1075
1076 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1077 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1078
1079 if (!buf)
1080 return 0;
1081
1082 switch (dev_priv->active_display_unit) {
1083 case vmw_du_legacy:
1084 vmw_overlay_pause_all(dev_priv);
1085 ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1086 vmw_overlay_resume_all(dev_priv);
1087 break;
1088 case vmw_du_screen_object:
1089 case vmw_du_screen_target:
1090 if (vfb->bo) {
1091 if (dev_priv->capabilities & SVGA_CAP_3D) {
1092 /*
1093 * Use surface DMA to get content to
1094 * sreen target surface.
1095 */
1096 placement = &vmw_vram_gmr_placement;
1097 } else {
1098 /* Use CPU blit. */
1099 placement = &vmw_sys_placement;
1100 }
1101 } else {
1102 /* Use surface / image update */
1103 placement = &vmw_mob_placement;
1104 }
1105
1106 return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1107 default:
1108 return -EINVAL;
1109 }
1110
1111 return ret;
1112}
1113
1114static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1115{
1116 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1117 struct vmw_buffer_object *buf;
1118
1119 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1120 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1121
1122 if (WARN_ON(!buf))
1123 return 0;
1124
1125 return vmw_bo_unpin(dev_priv, buf, false);
1126}
1127
1128/**
1129 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1130 *
1131 * @dev: DRM device
1132 * @mode_cmd: parameters for the new surface
1133 * @bo_mob: MOB backing the buffer object
1134 * @srf_out: newly created surface
1135 *
1136 * When the content FB is a buffer object, we create a surface as a proxy to the
1137 * same buffer. This way we can do a surface copy rather than a surface DMA.
1138 * This is a more efficient approach
1139 *
1140 * RETURNS:
1141 * 0 on success, error code otherwise
1142 */
1143static int vmw_create_bo_proxy(struct drm_device *dev,
1144 const struct drm_mode_fb_cmd2 *mode_cmd,
1145 struct vmw_buffer_object *bo_mob,
1146 struct vmw_surface **srf_out)
1147{
1148 uint32_t format;
1149 struct drm_vmw_size content_base_size = {0};
1150 struct vmw_resource *res;
1151 unsigned int bytes_pp;
1152 struct drm_format_name_buf format_name;
1153 int ret;
1154
1155 switch (mode_cmd->pixel_format) {
1156 case DRM_FORMAT_ARGB8888:
1157 case DRM_FORMAT_XRGB8888:
1158 format = SVGA3D_X8R8G8B8;
1159 bytes_pp = 4;
1160 break;
1161
1162 case DRM_FORMAT_RGB565:
1163 case DRM_FORMAT_XRGB1555:
1164 format = SVGA3D_R5G6B5;
1165 bytes_pp = 2;
1166 break;
1167
1168 case 8:
1169 format = SVGA3D_P8;
1170 bytes_pp = 1;
1171 break;
1172
1173 default:
1174 DRM_ERROR("Invalid framebuffer format %s\n",
1175 drm_get_format_name(mode_cmd->pixel_format, &format_name));
1176 return -EINVAL;
1177 }
1178
1179 content_base_size.width = mode_cmd->pitches[0] / bytes_pp;
1180 content_base_size.height = mode_cmd->height;
1181 content_base_size.depth = 1;
1182
1183 ret = vmw_surface_gb_priv_define(dev,
1184 0, /* kernel visible only */
1185 0, /* flags */
1186 format,
1187 true, /* can be a scanout buffer */
1188 1, /* num of mip levels */
1189 0,
1190 0,
1191 content_base_size,
1192 SVGA3D_MS_PATTERN_NONE,
1193 SVGA3D_MS_QUALITY_NONE,
1194 srf_out);
1195 if (ret) {
1196 DRM_ERROR("Failed to allocate proxy content buffer\n");
1197 return ret;
1198 }
1199
1200 res = &(*srf_out)->res;
1201
1202 /* Reserve and switch the backing mob. */
1203 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1204 (void) vmw_resource_reserve(res, false, true);
1205 vmw_bo_unreference(&res->backup);
1206 res->backup = vmw_bo_reference(bo_mob);
1207 res->backup_offset = 0;
1208 vmw_resource_unreserve(res, false, false, false, NULL, 0);
1209 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1210
1211 return 0;
1212}
1213
1214
1215
1216static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1217 struct vmw_buffer_object *bo,
1218 struct vmw_framebuffer **out,
1219 const struct drm_mode_fb_cmd2
1220 *mode_cmd)
1221
1222{
1223 struct drm_device *dev = dev_priv->dev;
1224 struct vmw_framebuffer_bo *vfbd;
1225 unsigned int requested_size;
1226 struct drm_format_name_buf format_name;
1227 int ret;
1228
1229 requested_size = mode_cmd->height * mode_cmd->pitches[0];
1230 if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
1231 DRM_ERROR("Screen buffer object size is too small "
1232 "for requested mode.\n");
1233 return -EINVAL;
1234 }
1235
1236 /* Limited framebuffer color depth support for screen objects */
1237 if (dev_priv->active_display_unit == vmw_du_screen_object) {
1238 switch (mode_cmd->pixel_format) {
1239 case DRM_FORMAT_XRGB8888:
1240 case DRM_FORMAT_ARGB8888:
1241 break;
1242 case DRM_FORMAT_XRGB1555:
1243 case DRM_FORMAT_RGB565:
1244 break;
1245 default:
1246 DRM_ERROR("Invalid pixel format: %s\n",
1247 drm_get_format_name(mode_cmd->pixel_format, &format_name));
1248 return -EINVAL;
1249 }
1250 }
1251
1252 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1253 if (!vfbd) {
1254 ret = -ENOMEM;
1255 goto out_err1;
1256 }
1257
1258 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1259 vfbd->base.bo = true;
1260 vfbd->buffer = vmw_bo_reference(bo);
1261 vfbd->base.user_handle = mode_cmd->handles[0];
1262 *out = &vfbd->base;
1263
1264 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1265 &vmw_framebuffer_bo_funcs);
1266 if (ret)
1267 goto out_err2;
1268
1269 return 0;
1270
1271out_err2:
1272 vmw_bo_unreference(&bo);
1273 kfree(vfbd);
1274out_err1:
1275 return ret;
1276}
1277
1278
1279/**
1280 * vmw_kms_srf_ok - check if a surface can be created
1281 *
1282 * @width: requested width
1283 * @height: requested height
1284 *
1285 * Surfaces need to be less than texture size
1286 */
1287static bool
1288vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1289{
1290 if (width > dev_priv->texture_max_width ||
1291 height > dev_priv->texture_max_height)
1292 return false;
1293
1294 return true;
1295}
1296
1297/**
1298 * vmw_kms_new_framebuffer - Create a new framebuffer.
1299 *
1300 * @dev_priv: Pointer to device private struct.
1301 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1302 * Either @bo or @surface must be NULL.
1303 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1304 * Either @bo or @surface must be NULL.
1305 * @only_2d: No presents will occur to this buffer object based framebuffer.
1306 * This helps the code to do some important optimizations.
1307 * @mode_cmd: Frame-buffer metadata.
1308 */
1309struct vmw_framebuffer *
1310vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1311 struct vmw_buffer_object *bo,
1312 struct vmw_surface *surface,
1313 bool only_2d,
1314 const struct drm_mode_fb_cmd2 *mode_cmd)
1315{
1316 struct vmw_framebuffer *vfb = NULL;
1317 bool is_bo_proxy = false;
1318 int ret;
1319
1320 /*
1321 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1322 * therefore, wrap the buffer object in a surface so we can use the
1323 * SurfaceCopy command.
1324 */
1325 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
1326 bo && only_2d &&
1327 mode_cmd->width > 64 && /* Don't create a proxy for cursor */
1328 dev_priv->active_display_unit == vmw_du_screen_target) {
1329 ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
1330 bo, &surface);
1331 if (ret)
1332 return ERR_PTR(ret);
1333
1334 is_bo_proxy = true;
1335 }
1336
1337 /* Create the new framebuffer depending one what we have */
1338 if (surface) {
1339 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1340 mode_cmd,
1341 is_bo_proxy);
1342
1343 /*
1344 * vmw_create_bo_proxy() adds a reference that is no longer
1345 * needed
1346 */
1347 if (is_bo_proxy)
1348 vmw_surface_unreference(&surface);
1349 } else if (bo) {
1350 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1351 mode_cmd);
1352 } else {
1353 BUG();
1354 }
1355
1356 if (ret)
1357 return ERR_PTR(ret);
1358
1359 vfb->pin = vmw_framebuffer_pin;
1360 vfb->unpin = vmw_framebuffer_unpin;
1361
1362 return vfb;
1363}
1364
1365/*
1366 * Generic Kernel modesetting functions
1367 */
1368
1369static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1370 struct drm_file *file_priv,
1371 const struct drm_mode_fb_cmd2 *mode_cmd)
1372{
1373 struct vmw_private *dev_priv = vmw_priv(dev);
1374 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1375 struct vmw_framebuffer *vfb = NULL;
1376 struct vmw_surface *surface = NULL;
1377 struct vmw_buffer_object *bo = NULL;
1378 struct ttm_base_object *user_obj;
1379 int ret;
1380
1381 /*
1382 * Take a reference on the user object of the resource
1383 * backing the kms fb. This ensures that user-space handle
1384 * lookups on that resource will always work as long as
1385 * it's registered with a kms framebuffer. This is important,
1386 * since vmw_execbuf_process identifies resources in the
1387 * command stream using user-space handles.
1388 */
1389
1390 user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
1391 if (unlikely(user_obj == NULL)) {
1392 DRM_ERROR("Could not locate requested kms frame buffer.\n");
1393 return ERR_PTR(-ENOENT);
1394 }
1395
1396 /**
1397 * End conditioned code.
1398 */
1399
1400 /* returns either a bo or surface */
1401 ret = vmw_user_lookup_handle(dev_priv, tfile,
1402 mode_cmd->handles[0],
1403 &surface, &bo);
1404 if (ret)
1405 goto err_out;
1406
1407
1408 if (!bo &&
1409 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1410 DRM_ERROR("Surface size cannot exceed %dx%d",
1411 dev_priv->texture_max_width,
1412 dev_priv->texture_max_height);
1413 ret = -EINVAL;
1414 goto err_out;
1415 }
1416
1417
1418 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1419 !(dev_priv->capabilities & SVGA_CAP_3D),
1420 mode_cmd);
1421 if (IS_ERR(vfb)) {
1422 ret = PTR_ERR(vfb);
1423 goto err_out;
1424 }
1425
1426err_out:
1427 /* vmw_user_lookup_handle takes one ref so does new_fb */
1428 if (bo)
1429 vmw_bo_unreference(&bo);
1430 if (surface)
1431 vmw_surface_unreference(&surface);
1432
1433 if (ret) {
1434 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1435 ttm_base_object_unref(&user_obj);
1436 return ERR_PTR(ret);
1437 } else
1438 vfb->user_obj = user_obj;
1439
1440 return &vfb->base;
1441}
1442
1443/**
1444 * vmw_kms_check_display_memory - Validates display memory required for a
1445 * topology
1446 * @dev: DRM device
1447 * @num_rects: number of drm_rect in rects
1448 * @rects: array of drm_rect representing the topology to validate indexed by
1449 * crtc index.
1450 *
1451 * Returns:
1452 * 0 on success otherwise negative error code
1453 */
1454static int vmw_kms_check_display_memory(struct drm_device *dev,
1455 uint32_t num_rects,
1456 struct drm_rect *rects)
1457{
1458 struct vmw_private *dev_priv = vmw_priv(dev);
1459 struct drm_rect bounding_box = {0};
1460 u64 total_pixels = 0, pixel_mem, bb_mem;
1461 int i;
1462
1463 for (i = 0; i < num_rects; i++) {
1464 /*
1465 * For STDU only individual screen (screen target) is limited by
1466 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1467 */
1468 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1469 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1470 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1471 VMW_DEBUG_KMS("Screen size not supported.\n");
1472 return -EINVAL;
1473 }
1474
1475 /* Bounding box upper left is at (0,0). */
1476 if (rects[i].x2 > bounding_box.x2)
1477 bounding_box.x2 = rects[i].x2;
1478
1479 if (rects[i].y2 > bounding_box.y2)
1480 bounding_box.y2 = rects[i].y2;
1481
1482 total_pixels += (u64) drm_rect_width(&rects[i]) *
1483 (u64) drm_rect_height(&rects[i]);
1484 }
1485
1486 /* Virtual svga device primary limits are always in 32-bpp. */
1487 pixel_mem = total_pixels * 4;
1488
1489 /*
1490 * For HV10 and below prim_bb_mem is vram size. When
1491 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1492 * limit on primary bounding box
1493 */
1494 if (pixel_mem > dev_priv->prim_bb_mem) {
1495 VMW_DEBUG_KMS("Combined output size too large.\n");
1496 return -EINVAL;
1497 }
1498
1499 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1500 if (dev_priv->active_display_unit != vmw_du_screen_target ||
1501 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1502 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1503
1504 if (bb_mem > dev_priv->prim_bb_mem) {
1505 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1506 return -EINVAL;
1507 }
1508 }
1509
1510 return 0;
1511}
1512
1513/**
1514 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1515 * crtc mutex
1516 * @state: The atomic state pointer containing the new atomic state
1517 * @crtc: The crtc
1518 *
1519 * This function returns the new crtc state if it's part of the state update.
1520 * Otherwise returns the current crtc state. It also makes sure that the
1521 * crtc mutex is locked.
1522 *
1523 * Returns: A valid crtc state pointer or NULL. It may also return a
1524 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1525 */
1526static struct drm_crtc_state *
1527vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1528{
1529 struct drm_crtc_state *crtc_state;
1530
1531 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1532 if (crtc_state) {
1533 lockdep_assert_held(&crtc->mutex.mutex.base);
1534 } else {
1535 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1536
1537 if (ret != 0 && ret != -EALREADY)
1538 return ERR_PTR(ret);
1539
1540 crtc_state = crtc->state;
1541 }
1542
1543 return crtc_state;
1544}
1545
1546/**
1547 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1548 * from the same fb after the new state is committed.
1549 * @dev: The drm_device.
1550 * @state: The new state to be checked.
1551 *
1552 * Returns:
1553 * Zero on success,
1554 * -EINVAL on invalid state,
1555 * -EDEADLK if modeset locking needs to be rerun.
1556 */
1557static int vmw_kms_check_implicit(struct drm_device *dev,
1558 struct drm_atomic_state *state)
1559{
1560 struct drm_framebuffer *implicit_fb = NULL;
1561 struct drm_crtc *crtc;
1562 struct drm_crtc_state *crtc_state;
1563 struct drm_plane_state *plane_state;
1564
1565 drm_for_each_crtc(crtc, dev) {
1566 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1567
1568 if (!du->is_implicit)
1569 continue;
1570
1571 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1572 if (IS_ERR(crtc_state))
1573 return PTR_ERR(crtc_state);
1574
1575 if (!crtc_state || !crtc_state->enable)
1576 continue;
1577
1578 /*
1579 * Can't move primary planes across crtcs, so this is OK.
1580 * It also means we don't need to take the plane mutex.
1581 */
1582 plane_state = du->primary.state;
1583 if (plane_state->crtc != crtc)
1584 continue;
1585
1586 if (!implicit_fb)
1587 implicit_fb = plane_state->fb;
1588 else if (implicit_fb != plane_state->fb)
1589 return -EINVAL;
1590 }
1591
1592 return 0;
1593}
1594
1595/**
1596 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1597 * @dev: DRM device
1598 * @state: the driver state object
1599 *
1600 * Returns:
1601 * 0 on success otherwise negative error code
1602 */
1603static int vmw_kms_check_topology(struct drm_device *dev,
1604 struct drm_atomic_state *state)
1605{
1606 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1607 struct drm_rect *rects;
1608 struct drm_crtc *crtc;
1609 uint32_t i;
1610 int ret = 0;
1611
1612 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1613 GFP_KERNEL);
1614 if (!rects)
1615 return -ENOMEM;
1616
1617 drm_for_each_crtc(crtc, dev) {
1618 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1619 struct drm_crtc_state *crtc_state;
1620
1621 i = drm_crtc_index(crtc);
1622
1623 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1624 if (IS_ERR(crtc_state)) {
1625 ret = PTR_ERR(crtc_state);
1626 goto clean;
1627 }
1628
1629 if (!crtc_state)
1630 continue;
1631
1632 if (crtc_state->enable) {
1633 rects[i].x1 = du->gui_x;
1634 rects[i].y1 = du->gui_y;
1635 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1636 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1637 } else {
1638 rects[i].x1 = 0;
1639 rects[i].y1 = 0;
1640 rects[i].x2 = 0;
1641 rects[i].y2 = 0;
1642 }
1643 }
1644
1645 /* Determine change to topology due to new atomic state */
1646 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1647 new_crtc_state, i) {
1648 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1649 struct drm_connector *connector;
1650 struct drm_connector_state *conn_state;
1651 struct vmw_connector_state *vmw_conn_state;
1652
1653 if (!du->pref_active && new_crtc_state->enable) {
1654 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1655 ret = -EINVAL;
1656 goto clean;
1657 }
1658
1659 /*
1660 * For vmwgfx each crtc has only one connector attached and it
1661 * is not changed so don't really need to check the
1662 * crtc->connector_mask and iterate over it.
1663 */
1664 connector = &du->connector;
1665 conn_state = drm_atomic_get_connector_state(state, connector);
1666 if (IS_ERR(conn_state)) {
1667 ret = PTR_ERR(conn_state);
1668 goto clean;
1669 }
1670
1671 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1672 vmw_conn_state->gui_x = du->gui_x;
1673 vmw_conn_state->gui_y = du->gui_y;
1674 }
1675
1676 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1677 rects);
1678
1679clean:
1680 kfree(rects);
1681 return ret;
1682}
1683
1684/**
1685 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1686 *
1687 * @dev: DRM device
1688 * @state: the driver state object
1689 *
1690 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1691 * us to assign a value to mode->crtc_clock so that
1692 * drm_calc_timestamping_constants() won't throw an error message
1693 *
1694 * Returns:
1695 * Zero for success or -errno
1696 */
1697static int
1698vmw_kms_atomic_check_modeset(struct drm_device *dev,
1699 struct drm_atomic_state *state)
1700{
1701 struct drm_crtc *crtc;
1702 struct drm_crtc_state *crtc_state;
1703 bool need_modeset = false;
1704 int i, ret;
1705
1706 ret = drm_atomic_helper_check(dev, state);
1707 if (ret)
1708 return ret;
1709
1710 ret = vmw_kms_check_implicit(dev, state);
1711 if (ret) {
1712 VMW_DEBUG_KMS("Invalid implicit state\n");
1713 return ret;
1714 }
1715
1716 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1717 if (drm_atomic_crtc_needs_modeset(crtc_state))
1718 need_modeset = true;
1719 }
1720
1721 if (need_modeset)
1722 return vmw_kms_check_topology(dev, state);
1723
1724 return ret;
1725}
1726
1727static const struct drm_mode_config_funcs vmw_kms_funcs = {
1728 .fb_create = vmw_kms_fb_create,
1729 .atomic_check = vmw_kms_atomic_check_modeset,
1730 .atomic_commit = drm_atomic_helper_commit,
1731};
1732
1733static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1734 struct drm_file *file_priv,
1735 struct vmw_framebuffer *vfb,
1736 struct vmw_surface *surface,
1737 uint32_t sid,
1738 int32_t destX, int32_t destY,
1739 struct drm_vmw_rect *clips,
1740 uint32_t num_clips)
1741{
1742 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1743 &surface->res, destX, destY,
1744 num_clips, 1, NULL, NULL);
1745}
1746
1747
1748int vmw_kms_present(struct vmw_private *dev_priv,
1749 struct drm_file *file_priv,
1750 struct vmw_framebuffer *vfb,
1751 struct vmw_surface *surface,
1752 uint32_t sid,
1753 int32_t destX, int32_t destY,
1754 struct drm_vmw_rect *clips,
1755 uint32_t num_clips)
1756{
1757 int ret;
1758
1759 switch (dev_priv->active_display_unit) {
1760 case vmw_du_screen_target:
1761 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1762 &surface->res, destX, destY,
1763 num_clips, 1, NULL, NULL);
1764 break;
1765 case vmw_du_screen_object:
1766 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1767 sid, destX, destY, clips,
1768 num_clips);
1769 break;
1770 default:
1771 WARN_ONCE(true,
1772 "Present called with invalid display system.\n");
1773 ret = -ENOSYS;
1774 break;
1775 }
1776 if (ret)
1777 return ret;
1778
1779 vmw_fifo_flush(dev_priv, false);
1780
1781 return 0;
1782}
1783
1784static void
1785vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1786{
1787 if (dev_priv->hotplug_mode_update_property)
1788 return;
1789
1790 dev_priv->hotplug_mode_update_property =
1791 drm_property_create_range(dev_priv->dev,
1792 DRM_MODE_PROP_IMMUTABLE,
1793 "hotplug_mode_update", 0, 1);
1794
1795 if (!dev_priv->hotplug_mode_update_property)
1796 return;
1797
1798}
1799
1800int vmw_kms_init(struct vmw_private *dev_priv)
1801{
1802 struct drm_device *dev = dev_priv->dev;
1803 int ret;
1804
1805 drm_mode_config_init(dev);
1806 dev->mode_config.funcs = &vmw_kms_funcs;
1807 dev->mode_config.min_width = 1;
1808 dev->mode_config.min_height = 1;
1809 dev->mode_config.max_width = dev_priv->texture_max_width;
1810 dev->mode_config.max_height = dev_priv->texture_max_height;
1811
1812 drm_mode_create_suggested_offset_properties(dev);
1813 vmw_kms_create_hotplug_mode_update_property(dev_priv);
1814
1815 ret = vmw_kms_stdu_init_display(dev_priv);
1816 if (ret) {
1817 ret = vmw_kms_sou_init_display(dev_priv);
1818 if (ret) /* Fallback */
1819 ret = vmw_kms_ldu_init_display(dev_priv);
1820 }
1821
1822 return ret;
1823}
1824
1825int vmw_kms_close(struct vmw_private *dev_priv)
1826{
1827 int ret = 0;
1828
1829 /*
1830 * Docs says we should take the lock before calling this function
1831 * but since it destroys encoders and our destructor calls
1832 * drm_encoder_cleanup which takes the lock we deadlock.
1833 */
1834 drm_mode_config_cleanup(dev_priv->dev);
1835 if (dev_priv->active_display_unit == vmw_du_legacy)
1836 ret = vmw_kms_ldu_close_display(dev_priv);
1837
1838 return ret;
1839}
1840
1841int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1842 struct drm_file *file_priv)
1843{
1844 struct drm_vmw_cursor_bypass_arg *arg = data;
1845 struct vmw_display_unit *du;
1846 struct drm_crtc *crtc;
1847 int ret = 0;
1848
1849
1850 mutex_lock(&dev->mode_config.mutex);
1851 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1852
1853 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1854 du = vmw_crtc_to_du(crtc);
1855 du->hotspot_x = arg->xhot;
1856 du->hotspot_y = arg->yhot;
1857 }
1858
1859 mutex_unlock(&dev->mode_config.mutex);
1860 return 0;
1861 }
1862
1863 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
1864 if (!crtc) {
1865 ret = -ENOENT;
1866 goto out;
1867 }
1868
1869 du = vmw_crtc_to_du(crtc);
1870
1871 du->hotspot_x = arg->xhot;
1872 du->hotspot_y = arg->yhot;
1873
1874out:
1875 mutex_unlock(&dev->mode_config.mutex);
1876
1877 return ret;
1878}
1879
1880int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1881 unsigned width, unsigned height, unsigned pitch,
1882 unsigned bpp, unsigned depth)
1883{
1884 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1885 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1886 else if (vmw_fifo_have_pitchlock(vmw_priv))
1887 vmw_mmio_write(pitch, vmw_priv->mmio_virt +
1888 SVGA_FIFO_PITCHLOCK);
1889 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1890 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1891 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1892
1893 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1894 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1895 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1896 return -EINVAL;
1897 }
1898
1899 return 0;
1900}
1901
1902int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1903{
1904 struct vmw_vga_topology_state *save;
1905 uint32_t i;
1906
1907 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1908 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1909 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1910 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1911 vmw_priv->vga_pitchlock =
1912 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1913 else if (vmw_fifo_have_pitchlock(vmw_priv))
1914 vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
1915 SVGA_FIFO_PITCHLOCK);
1916
1917 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1918 return 0;
1919
1920 vmw_priv->num_displays = vmw_read(vmw_priv,
1921 SVGA_REG_NUM_GUEST_DISPLAYS);
1922
1923 if (vmw_priv->num_displays == 0)
1924 vmw_priv->num_displays = 1;
1925
1926 for (i = 0; i < vmw_priv->num_displays; ++i) {
1927 save = &vmw_priv->vga_save[i];
1928 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1929 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1930 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1931 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1932 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1933 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1934 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1935 if (i == 0 && vmw_priv->num_displays == 1 &&
1936 save->width == 0 && save->height == 0) {
1937
1938 /*
1939 * It should be fairly safe to assume that these
1940 * values are uninitialized.
1941 */
1942
1943 save->width = vmw_priv->vga_width - save->pos_x;
1944 save->height = vmw_priv->vga_height - save->pos_y;
1945 }
1946 }
1947
1948 return 0;
1949}
1950
1951int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1952{
1953 struct vmw_vga_topology_state *save;
1954 uint32_t i;
1955
1956 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1957 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
1958 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1959 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1960 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1961 vmw_priv->vga_pitchlock);
1962 else if (vmw_fifo_have_pitchlock(vmw_priv))
1963 vmw_mmio_write(vmw_priv->vga_pitchlock,
1964 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1965
1966 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1967 return 0;
1968
1969 for (i = 0; i < vmw_priv->num_displays; ++i) {
1970 save = &vmw_priv->vga_save[i];
1971 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1972 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1973 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1974 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1975 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1976 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1977 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1978 }
1979
1980 return 0;
1981}
1982
1983bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1984 uint32_t pitch,
1985 uint32_t height)
1986{
1987 return ((u64) pitch * (u64) height) < (u64)
1988 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
1989 dev_priv->prim_bb_mem : dev_priv->vram_size);
1990}
1991
1992
1993/**
1994 * Function called by DRM code called with vbl_lock held.
1995 */
1996u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
1997{
1998 return 0;
1999}
2000
2001/**
2002 * Function called by DRM code called with vbl_lock held.
2003 */
2004int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
2005{
2006 return -EINVAL;
2007}
2008
2009/**
2010 * Function called by DRM code called with vbl_lock held.
2011 */
2012void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
2013{
2014}
2015
2016/**
2017 * vmw_du_update_layout - Update the display unit with topology from resolution
2018 * plugin and generate DRM uevent
2019 * @dev_priv: device private
2020 * @num_rects: number of drm_rect in rects
2021 * @rects: toplogy to update
2022 */
2023static int vmw_du_update_layout(struct vmw_private *dev_priv,
2024 unsigned int num_rects, struct drm_rect *rects)
2025{
2026 struct drm_device *dev = dev_priv->dev;
2027 struct vmw_display_unit *du;
2028 struct drm_connector *con;
2029 struct drm_connector_list_iter conn_iter;
2030 struct drm_modeset_acquire_ctx ctx;
2031 struct drm_crtc *crtc;
2032 int ret;
2033
2034 /* Currently gui_x/y is protected with the crtc mutex */
2035 mutex_lock(&dev->mode_config.mutex);
2036 drm_modeset_acquire_init(&ctx, 0);
2037retry:
2038 drm_for_each_crtc(crtc, dev) {
2039 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2040 if (ret < 0) {
2041 if (ret == -EDEADLK) {
2042 drm_modeset_backoff(&ctx);
2043 goto retry;
2044 }
2045 goto out_fini;
2046 }
2047 }
2048
2049 drm_connector_list_iter_begin(dev, &conn_iter);
2050 drm_for_each_connector_iter(con, &conn_iter) {
2051 du = vmw_connector_to_du(con);
2052 if (num_rects > du->unit) {
2053 du->pref_width = drm_rect_width(&rects[du->unit]);
2054 du->pref_height = drm_rect_height(&rects[du->unit]);
2055 du->pref_active = true;
2056 du->gui_x = rects[du->unit].x1;
2057 du->gui_y = rects[du->unit].y1;
2058 } else {
2059 du->pref_width = 800;
2060 du->pref_height = 600;
2061 du->pref_active = false;
2062 du->gui_x = 0;
2063 du->gui_y = 0;
2064 }
2065 }
2066 drm_connector_list_iter_end(&conn_iter);
2067
2068 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2069 du = vmw_connector_to_du(con);
2070 if (num_rects > du->unit) {
2071 drm_object_property_set_value
2072 (&con->base, dev->mode_config.suggested_x_property,
2073 du->gui_x);
2074 drm_object_property_set_value
2075 (&con->base, dev->mode_config.suggested_y_property,
2076 du->gui_y);
2077 } else {
2078 drm_object_property_set_value
2079 (&con->base, dev->mode_config.suggested_x_property,
2080 0);
2081 drm_object_property_set_value
2082 (&con->base, dev->mode_config.suggested_y_property,
2083 0);
2084 }
2085 con->status = vmw_du_connector_detect(con, true);
2086 }
2087
2088 drm_sysfs_hotplug_event(dev);
2089out_fini:
2090 drm_modeset_drop_locks(&ctx);
2091 drm_modeset_acquire_fini(&ctx);
2092 mutex_unlock(&dev->mode_config.mutex);
2093
2094 return 0;
2095}
2096
2097int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2098 u16 *r, u16 *g, u16 *b,
2099 uint32_t size,
2100 struct drm_modeset_acquire_ctx *ctx)
2101{
2102 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2103 int i;
2104
2105 for (i = 0; i < size; i++) {
2106 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2107 r[i], g[i], b[i]);
2108 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2109 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2110 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2111 }
2112
2113 return 0;
2114}
2115
2116int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2117{
2118 return 0;
2119}
2120
2121enum drm_connector_status
2122vmw_du_connector_detect(struct drm_connector *connector, bool force)
2123{
2124 uint32_t num_displays;
2125 struct drm_device *dev = connector->dev;
2126 struct vmw_private *dev_priv = vmw_priv(dev);
2127 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2128
2129 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2130
2131 return ((vmw_connector_to_du(connector)->unit < num_displays &&
2132 du->pref_active) ?
2133 connector_status_connected : connector_status_disconnected);
2134}
2135
2136static struct drm_display_mode vmw_kms_connector_builtin[] = {
2137 /* 640x480@60Hz */
2138 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2139 752, 800, 0, 480, 489, 492, 525, 0,
2140 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2141 /* 800x600@60Hz */
2142 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2143 968, 1056, 0, 600, 601, 605, 628, 0,
2144 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2145 /* 1024x768@60Hz */
2146 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2147 1184, 1344, 0, 768, 771, 777, 806, 0,
2148 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2149 /* 1152x864@75Hz */
2150 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2151 1344, 1600, 0, 864, 865, 868, 900, 0,
2152 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2153 /* 1280x768@60Hz */
2154 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2155 1472, 1664, 0, 768, 771, 778, 798, 0,
2156 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2157 /* 1280x800@60Hz */
2158 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2159 1480, 1680, 0, 800, 803, 809, 831, 0,
2160 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2161 /* 1280x960@60Hz */
2162 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2163 1488, 1800, 0, 960, 961, 964, 1000, 0,
2164 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2165 /* 1280x1024@60Hz */
2166 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2167 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2168 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2169 /* 1360x768@60Hz */
2170 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2171 1536, 1792, 0, 768, 771, 777, 795, 0,
2172 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2173 /* 1440x1050@60Hz */
2174 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2175 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2176 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2177 /* 1440x900@60Hz */
2178 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2179 1672, 1904, 0, 900, 903, 909, 934, 0,
2180 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2181 /* 1600x1200@60Hz */
2182 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2183 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2184 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2185 /* 1680x1050@60Hz */
2186 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2187 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2188 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2189 /* 1792x1344@60Hz */
2190 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2191 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2192 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2193 /* 1853x1392@60Hz */
2194 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2195 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2196 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2197 /* 1920x1200@60Hz */
2198 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2199 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2200 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2201 /* 1920x1440@60Hz */
2202 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2203 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2204 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2205 /* 2560x1600@60Hz */
2206 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2207 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2208 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2209 /* Terminate */
2210 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2211};
2212
2213/**
2214 * vmw_guess_mode_timing - Provide fake timings for a
2215 * 60Hz vrefresh mode.
2216 *
2217 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
2218 * members filled in.
2219 */
2220void vmw_guess_mode_timing(struct drm_display_mode *mode)
2221{
2222 mode->hsync_start = mode->hdisplay + 50;
2223 mode->hsync_end = mode->hsync_start + 50;
2224 mode->htotal = mode->hsync_end + 50;
2225
2226 mode->vsync_start = mode->vdisplay + 50;
2227 mode->vsync_end = mode->vsync_start + 50;
2228 mode->vtotal = mode->vsync_end + 50;
2229
2230 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2231 mode->vrefresh = drm_mode_vrefresh(mode);
2232}
2233
2234
2235int vmw_du_connector_fill_modes(struct drm_connector *connector,
2236 uint32_t max_width, uint32_t max_height)
2237{
2238 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2239 struct drm_device *dev = connector->dev;
2240 struct vmw_private *dev_priv = vmw_priv(dev);
2241 struct drm_display_mode *mode = NULL;
2242 struct drm_display_mode *bmode;
2243 struct drm_display_mode prefmode = { DRM_MODE("preferred",
2244 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2245 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2246 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2247 };
2248 int i;
2249 u32 assumed_bpp = 4;
2250
2251 if (dev_priv->assume_16bpp)
2252 assumed_bpp = 2;
2253
2254 max_width = min(max_width, dev_priv->texture_max_width);
2255 max_height = min(max_height, dev_priv->texture_max_height);
2256
2257 /*
2258 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2259 * HEIGHT registers.
2260 */
2261 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2262 max_width = min(max_width, dev_priv->stdu_max_width);
2263 max_height = min(max_height, dev_priv->stdu_max_height);
2264 }
2265
2266 /* Add preferred mode */
2267 mode = drm_mode_duplicate(dev, &prefmode);
2268 if (!mode)
2269 return 0;
2270 mode->hdisplay = du->pref_width;
2271 mode->vdisplay = du->pref_height;
2272 vmw_guess_mode_timing(mode);
2273
2274 if (vmw_kms_validate_mode_vram(dev_priv,
2275 mode->hdisplay * assumed_bpp,
2276 mode->vdisplay)) {
2277 drm_mode_probed_add(connector, mode);
2278 } else {
2279 drm_mode_destroy(dev, mode);
2280 mode = NULL;
2281 }
2282
2283 if (du->pref_mode) {
2284 list_del_init(&du->pref_mode->head);
2285 drm_mode_destroy(dev, du->pref_mode);
2286 }
2287
2288 /* mode might be null here, this is intended */
2289 du->pref_mode = mode;
2290
2291 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2292 bmode = &vmw_kms_connector_builtin[i];
2293 if (bmode->hdisplay > max_width ||
2294 bmode->vdisplay > max_height)
2295 continue;
2296
2297 if (!vmw_kms_validate_mode_vram(dev_priv,
2298 bmode->hdisplay * assumed_bpp,
2299 bmode->vdisplay))
2300 continue;
2301
2302 mode = drm_mode_duplicate(dev, bmode);
2303 if (!mode)
2304 return 0;
2305 mode->vrefresh = drm_mode_vrefresh(mode);
2306
2307 drm_mode_probed_add(connector, mode);
2308 }
2309
2310 drm_connector_list_update(connector);
2311 /* Move the prefered mode first, help apps pick the right mode. */
2312 drm_mode_sort(&connector->modes);
2313
2314 return 1;
2315}
2316
2317/**
2318 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2319 * @dev: drm device for the ioctl
2320 * @data: data pointer for the ioctl
2321 * @file_priv: drm file for the ioctl call
2322 *
2323 * Update preferred topology of display unit as per ioctl request. The topology
2324 * is expressed as array of drm_vmw_rect.
2325 * e.g.
2326 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2327 *
2328 * NOTE:
2329 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2330 * device limit on topology, x + w and y + h (lower right) cannot be greater
2331 * than INT_MAX. So topology beyond these limits will return with error.
2332 *
2333 * Returns:
2334 * Zero on success, negative errno on failure.
2335 */
2336int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2337 struct drm_file *file_priv)
2338{
2339 struct vmw_private *dev_priv = vmw_priv(dev);
2340 struct drm_mode_config *mode_config = &dev->mode_config;
2341 struct drm_vmw_update_layout_arg *arg =
2342 (struct drm_vmw_update_layout_arg *)data;
2343 void __user *user_rects;
2344 struct drm_vmw_rect *rects;
2345 struct drm_rect *drm_rects;
2346 unsigned rects_size;
2347 int ret, i;
2348
2349 if (!arg->num_outputs) {
2350 struct drm_rect def_rect = {0, 0, 800, 600};
2351 VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
2352 def_rect.x1, def_rect.y1,
2353 def_rect.x2, def_rect.y2);
2354 vmw_du_update_layout(dev_priv, 1, &def_rect);
2355 return 0;
2356 }
2357
2358 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2359 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2360 GFP_KERNEL);
2361 if (unlikely(!rects))
2362 return -ENOMEM;
2363
2364 user_rects = (void __user *)(unsigned long)arg->rects;
2365 ret = copy_from_user(rects, user_rects, rects_size);
2366 if (unlikely(ret != 0)) {
2367 DRM_ERROR("Failed to get rects.\n");
2368 ret = -EFAULT;
2369 goto out_free;
2370 }
2371
2372 drm_rects = (struct drm_rect *)rects;
2373
2374 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2375 for (i = 0; i < arg->num_outputs; i++) {
2376 struct drm_vmw_rect curr_rect;
2377
2378 /* Verify user-space for overflow as kernel use drm_rect */
2379 if ((rects[i].x + rects[i].w > INT_MAX) ||
2380 (rects[i].y + rects[i].h > INT_MAX)) {
2381 ret = -ERANGE;
2382 goto out_free;
2383 }
2384
2385 curr_rect = rects[i];
2386 drm_rects[i].x1 = curr_rect.x;
2387 drm_rects[i].y1 = curr_rect.y;
2388 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2389 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2390
2391 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
2392 drm_rects[i].x1, drm_rects[i].y1,
2393 drm_rects[i].x2, drm_rects[i].y2);
2394
2395 /*
2396 * Currently this check is limiting the topology within
2397 * mode_config->max (which actually is max texture size
2398 * supported by virtual device). This limit is here to address
2399 * window managers that create a big framebuffer for whole
2400 * topology.
2401 */
2402 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2403 drm_rects[i].x2 > mode_config->max_width ||
2404 drm_rects[i].y2 > mode_config->max_height) {
2405 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2406 drm_rects[i].x1, drm_rects[i].y1,
2407 drm_rects[i].x2, drm_rects[i].y2);
2408 ret = -EINVAL;
2409 goto out_free;
2410 }
2411 }
2412
2413 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2414
2415 if (ret == 0)
2416 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2417
2418out_free:
2419 kfree(rects);
2420 return ret;
2421}
2422
2423/**
2424 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2425 * on a set of cliprects and a set of display units.
2426 *
2427 * @dev_priv: Pointer to a device private structure.
2428 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2429 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2430 * Cliprects are given in framebuffer coordinates.
2431 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2432 * be NULL. Cliprects are given in source coordinates.
2433 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2434 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2435 * @num_clips: Number of cliprects in the @clips or @vclips array.
2436 * @increment: Integer with which to increment the clip counter when looping.
2437 * Used to skip a predetermined number of clip rects.
2438 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2439 */
2440int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2441 struct vmw_framebuffer *framebuffer,
2442 const struct drm_clip_rect *clips,
2443 const struct drm_vmw_rect *vclips,
2444 s32 dest_x, s32 dest_y,
2445 int num_clips,
2446 int increment,
2447 struct vmw_kms_dirty *dirty)
2448{
2449 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2450 struct drm_crtc *crtc;
2451 u32 num_units = 0;
2452 u32 i, k;
2453
2454 dirty->dev_priv = dev_priv;
2455
2456 /* If crtc is passed, no need to iterate over other display units */
2457 if (dirty->crtc) {
2458 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2459 } else {
2460 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
2461 head) {
2462 struct drm_plane *plane = crtc->primary;
2463
2464 if (plane->state->fb == &framebuffer->base)
2465 units[num_units++] = vmw_crtc_to_du(crtc);
2466 }
2467 }
2468
2469 for (k = 0; k < num_units; k++) {
2470 struct vmw_display_unit *unit = units[k];
2471 s32 crtc_x = unit->crtc.x;
2472 s32 crtc_y = unit->crtc.y;
2473 s32 crtc_width = unit->crtc.mode.hdisplay;
2474 s32 crtc_height = unit->crtc.mode.vdisplay;
2475 const struct drm_clip_rect *clips_ptr = clips;
2476 const struct drm_vmw_rect *vclips_ptr = vclips;
2477
2478 dirty->unit = unit;
2479 if (dirty->fifo_reserve_size > 0) {
2480 dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
2481 dirty->fifo_reserve_size);
2482 if (!dirty->cmd)
2483 return -ENOMEM;
2484
2485 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2486 }
2487 dirty->num_hits = 0;
2488 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2489 vclips_ptr += increment) {
2490 s32 clip_left;
2491 s32 clip_top;
2492
2493 /*
2494 * Select clip array type. Note that integer type
2495 * in @clips is unsigned short, whereas in @vclips
2496 * it's 32-bit.
2497 */
2498 if (clips) {
2499 dirty->fb_x = (s32) clips_ptr->x1;
2500 dirty->fb_y = (s32) clips_ptr->y1;
2501 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2502 crtc_x;
2503 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2504 crtc_y;
2505 } else {
2506 dirty->fb_x = vclips_ptr->x;
2507 dirty->fb_y = vclips_ptr->y;
2508 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2509 dest_x - crtc_x;
2510 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2511 dest_y - crtc_y;
2512 }
2513
2514 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2515 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2516
2517 /* Skip this clip if it's outside the crtc region */
2518 if (dirty->unit_x1 >= crtc_width ||
2519 dirty->unit_y1 >= crtc_height ||
2520 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2521 continue;
2522
2523 /* Clip right and bottom to crtc limits */
2524 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2525 crtc_width);
2526 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2527 crtc_height);
2528
2529 /* Clip left and top to crtc limits */
2530 clip_left = min_t(s32, dirty->unit_x1, 0);
2531 clip_top = min_t(s32, dirty->unit_y1, 0);
2532 dirty->unit_x1 -= clip_left;
2533 dirty->unit_y1 -= clip_top;
2534 dirty->fb_x -= clip_left;
2535 dirty->fb_y -= clip_top;
2536
2537 dirty->clip(dirty);
2538 }
2539
2540 dirty->fifo_commit(dirty);
2541 }
2542
2543 return 0;
2544}
2545
2546/**
2547 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2548 * cleanup and fencing
2549 * @dev_priv: Pointer to the device-private struct
2550 * @file_priv: Pointer identifying the client when user-space fencing is used
2551 * @ctx: Pointer to the validation context
2552 * @out_fence: If non-NULL, returned refcounted fence-pointer
2553 * @user_fence_rep: If non-NULL, pointer to user-space address area
2554 * in which to copy user-space fence info
2555 */
2556void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2557 struct drm_file *file_priv,
2558 struct vmw_validation_context *ctx,
2559 struct vmw_fence_obj **out_fence,
2560 struct drm_vmw_fence_rep __user *
2561 user_fence_rep)
2562{
2563 struct vmw_fence_obj *fence = NULL;
2564 uint32_t handle = 0;
2565 int ret = 0;
2566
2567 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2568 out_fence)
2569 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2570 file_priv ? &handle : NULL);
2571 vmw_validation_done(ctx, fence);
2572 if (file_priv)
2573 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2574 ret, user_fence_rep, fence,
2575 handle, -1);
2576 if (out_fence)
2577 *out_fence = fence;
2578 else
2579 vmw_fence_obj_unreference(&fence);
2580}
2581
2582/**
2583 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2584 * its backing MOB.
2585 *
2586 * @res: Pointer to the surface resource
2587 * @clips: Clip rects in framebuffer (surface) space.
2588 * @num_clips: Number of clips in @clips.
2589 * @increment: Integer with which to increment the clip counter when looping.
2590 * Used to skip a predetermined number of clip rects.
2591 *
2592 * This function makes sure the proxy surface is updated from its backing MOB
2593 * using the region given by @clips. The surface resource @res and its backing
2594 * MOB needs to be reserved and validated on call.
2595 */
2596int vmw_kms_update_proxy(struct vmw_resource *res,
2597 const struct drm_clip_rect *clips,
2598 unsigned num_clips,
2599 int increment)
2600{
2601 struct vmw_private *dev_priv = res->dev_priv;
2602 struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
2603 struct {
2604 SVGA3dCmdHeader header;
2605 SVGA3dCmdUpdateGBImage body;
2606 } *cmd;
2607 SVGA3dBox *box;
2608 size_t copy_size = 0;
2609 int i;
2610
2611 if (!clips)
2612 return 0;
2613
2614 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2615 if (!cmd)
2616 return -ENOMEM;
2617
2618 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2619 box = &cmd->body.box;
2620
2621 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2622 cmd->header.size = sizeof(cmd->body);
2623 cmd->body.image.sid = res->id;
2624 cmd->body.image.face = 0;
2625 cmd->body.image.mipmap = 0;
2626
2627 if (clips->x1 > size->width || clips->x2 > size->width ||
2628 clips->y1 > size->height || clips->y2 > size->height) {
2629 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2630 return -EINVAL;
2631 }
2632
2633 box->x = clips->x1;
2634 box->y = clips->y1;
2635 box->z = 0;
2636 box->w = clips->x2 - clips->x1;
2637 box->h = clips->y2 - clips->y1;
2638 box->d = 1;
2639
2640 copy_size += sizeof(*cmd);
2641 }
2642
2643 vmw_fifo_commit(dev_priv, copy_size);
2644
2645 return 0;
2646}
2647
2648int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2649 unsigned unit,
2650 u32 max_width,
2651 u32 max_height,
2652 struct drm_connector **p_con,
2653 struct drm_crtc **p_crtc,
2654 struct drm_display_mode **p_mode)
2655{
2656 struct drm_connector *con;
2657 struct vmw_display_unit *du;
2658 struct drm_display_mode *mode;
2659 int i = 0;
2660 int ret = 0;
2661
2662 mutex_lock(&dev_priv->dev->mode_config.mutex);
2663 list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
2664 head) {
2665 if (i == unit)
2666 break;
2667
2668 ++i;
2669 }
2670
2671 if (&con->head == &dev_priv->dev->mode_config.connector_list) {
2672 DRM_ERROR("Could not find initial display unit.\n");
2673 ret = -EINVAL;
2674 goto out_unlock;
2675 }
2676
2677 if (list_empty(&con->modes))
2678 (void) vmw_du_connector_fill_modes(con, max_width, max_height);
2679
2680 if (list_empty(&con->modes)) {
2681 DRM_ERROR("Could not find initial display mode.\n");
2682 ret = -EINVAL;
2683 goto out_unlock;
2684 }
2685
2686 du = vmw_connector_to_du(con);
2687 *p_con = con;
2688 *p_crtc = &du->crtc;
2689
2690 list_for_each_entry(mode, &con->modes, head) {
2691 if (mode->type & DRM_MODE_TYPE_PREFERRED)
2692 break;
2693 }
2694
2695 if (&mode->head == &con->modes) {
2696 WARN_ONCE(true, "Could not find initial preferred mode.\n");
2697 *p_mode = list_first_entry(&con->modes,
2698 struct drm_display_mode,
2699 head);
2700 } else {
2701 *p_mode = mode;
2702 }
2703
2704 out_unlock:
2705 mutex_unlock(&dev_priv->dev->mode_config.mutex);
2706
2707 return ret;
2708}
2709
2710/**
2711 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
2712 * property.
2713 *
2714 * @dev_priv: Pointer to a device private struct.
2715 *
2716 * Sets up the implicit placement property unless it's already set up.
2717 */
2718void
2719vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2720{
2721 if (dev_priv->implicit_placement_property)
2722 return;
2723
2724 dev_priv->implicit_placement_property =
2725 drm_property_create_range(dev_priv->dev,
2726 DRM_MODE_PROP_IMMUTABLE,
2727 "implicit_placement", 0, 1);
2728}
2729
2730/**
2731 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2732 *
2733 * @dev: Pointer to the drm device
2734 * Return: 0 on success. Negative error code on failure.
2735 */
2736int vmw_kms_suspend(struct drm_device *dev)
2737{
2738 struct vmw_private *dev_priv = vmw_priv(dev);
2739
2740 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2741 if (IS_ERR(dev_priv->suspend_state)) {
2742 int ret = PTR_ERR(dev_priv->suspend_state);
2743
2744 DRM_ERROR("Failed kms suspend: %d\n", ret);
2745 dev_priv->suspend_state = NULL;
2746
2747 return ret;
2748 }
2749
2750 return 0;
2751}
2752
2753
2754/**
2755 * vmw_kms_resume - Re-enable modesetting and restore state
2756 *
2757 * @dev: Pointer to the drm device
2758 * Return: 0 on success. Negative error code on failure.
2759 *
2760 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2761 * to call this function without a previous vmw_kms_suspend().
2762 */
2763int vmw_kms_resume(struct drm_device *dev)
2764{
2765 struct vmw_private *dev_priv = vmw_priv(dev);
2766 int ret;
2767
2768 if (WARN_ON(!dev_priv->suspend_state))
2769 return 0;
2770
2771 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2772 dev_priv->suspend_state = NULL;
2773
2774 return ret;
2775}
2776
2777/**
2778 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2779 *
2780 * @dev: Pointer to the drm device
2781 */
2782void vmw_kms_lost_device(struct drm_device *dev)
2783{
2784 drm_atomic_helper_shutdown(dev);
2785}
2786
2787/**
2788 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2789 * @update: The closure structure.
2790 *
2791 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2792 * update on display unit.
2793 *
2794 * Return: 0 on success or a negative error code on failure.
2795 */
2796int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2797{
2798 struct drm_plane_state *state = update->plane->state;
2799 struct drm_plane_state *old_state = update->old_state;
2800 struct drm_atomic_helper_damage_iter iter;
2801 struct drm_rect clip;
2802 struct drm_rect bb;
2803 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2804 uint32_t reserved_size = 0;
2805 uint32_t submit_size = 0;
2806 uint32_t curr_size = 0;
2807 uint32_t num_hits = 0;
2808 void *cmd_start;
2809 char *cmd_next;
2810 int ret;
2811
2812 /*
2813 * Iterate in advance to check if really need plane update and find the
2814 * number of clips that actually are in plane src for fifo allocation.
2815 */
2816 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2817 drm_atomic_for_each_plane_damage(&iter, &clip)
2818 num_hits++;
2819
2820 if (num_hits == 0)
2821 return 0;
2822
2823 if (update->vfb->bo) {
2824 struct vmw_framebuffer_bo *vfbbo =
2825 container_of(update->vfb, typeof(*vfbbo), base);
2826
2827 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
2828 update->cpu_blit);
2829 } else {
2830 struct vmw_framebuffer_surface *vfbs =
2831 container_of(update->vfb, typeof(*vfbs), base);
2832
2833 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2834 0, VMW_RES_DIRTY_NONE, NULL,
2835 NULL);
2836 }
2837
2838 if (ret)
2839 return ret;
2840
2841 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2842 if (ret)
2843 goto out_unref;
2844
2845 reserved_size = update->calc_fifo_size(update, num_hits);
2846 cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
2847 if (!cmd_start) {
2848 ret = -ENOMEM;
2849 goto out_revert;
2850 }
2851
2852 cmd_next = cmd_start;
2853
2854 if (update->post_prepare) {
2855 curr_size = update->post_prepare(update, cmd_next);
2856 cmd_next += curr_size;
2857 submit_size += curr_size;
2858 }
2859
2860 if (update->pre_clip) {
2861 curr_size = update->pre_clip(update, cmd_next, num_hits);
2862 cmd_next += curr_size;
2863 submit_size += curr_size;
2864 }
2865
2866 bb.x1 = INT_MAX;
2867 bb.y1 = INT_MAX;
2868 bb.x2 = INT_MIN;
2869 bb.y2 = INT_MIN;
2870
2871 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2872 drm_atomic_for_each_plane_damage(&iter, &clip) {
2873 uint32_t fb_x = clip.x1;
2874 uint32_t fb_y = clip.y1;
2875
2876 vmw_du_translate_to_crtc(state, &clip);
2877 if (update->clip) {
2878 curr_size = update->clip(update, cmd_next, &clip, fb_x,
2879 fb_y);
2880 cmd_next += curr_size;
2881 submit_size += curr_size;
2882 }
2883 bb.x1 = min_t(int, bb.x1, clip.x1);
2884 bb.y1 = min_t(int, bb.y1, clip.y1);
2885 bb.x2 = max_t(int, bb.x2, clip.x2);
2886 bb.y2 = max_t(int, bb.y2, clip.y2);
2887 }
2888
2889 curr_size = update->post_clip(update, cmd_next, &bb);
2890 submit_size += curr_size;
2891
2892 if (reserved_size < submit_size)
2893 submit_size = 0;
2894
2895 vmw_fifo_commit(update->dev_priv, submit_size);
2896
2897 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2898 update->out_fence, NULL);
2899 return ret;
2900
2901out_revert:
2902 vmw_validation_revert(&val_ctx);
2903
2904out_unref:
2905 vmw_validation_unref_lists(&val_ctx);
2906 return ret;
2907}