Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
43 commits
Select commit Hold shift + click to select a range
9d340c2
Revert "drm/virtio: process virtgpu bo before bo free"
bosheng1 Apr 16, 2025
3b09b78
Revert "drm/virtio: fix vqs_info memory allocation issue"
bosheng1 Apr 16, 2025
0797373
Revert "drm/virtio: Reinstall vblank event when flip sequence doesn't…
bosheng1 Apr 16, 2025
4035376
Revert "drm/virtio: Protect cached vblank event with atomic operations"
bosheng1 Apr 16, 2025
cb5d0bb
Revert "drm/virtio: add support for VF LMEM type BLOB creation"
bosheng1 Apr 16, 2025
b590da2
Revert "drm/virtio: Use flip sequence from virtio-GPU back-end"
bosheng1 Apr 16, 2025
ed5ad7a
Revert "drm/virtio: Add a debugfs file to show object info"
bosheng1 Apr 16, 2025
22fbec0
Revert "drm/virtio: Save address entries of PRIME objects"
bosheng1 Apr 16, 2025
fee43f6
Revert "drm/virtio: put set modifier ahead of set scanout"
bosheng1 Apr 16, 2025
dc4698b
Revert "drm/virtio: Expose allow_p2p capability to user space"
bosheng1 Apr 16, 2025
070a131
Revert "drm/virtio: set max vblank number"
bosheng1 Apr 16, 2025
91e3216
Revert "drm/virtio: virtio gpu: add import dmabuf check"
bosheng1 Apr 16, 2025
0850552
Revert "drm/virtio: Add ioctl to distinguish virtio-gpu and virtio-iv…
bosheng1 Apr 16, 2025
ac70632
Revert "drm/virtio: Fix virtio-gpu uses symbol from namespace DMA_BUF…
bosheng1 Apr 16, 2025
4382558
Revert "drm/virtio: Change value of feature ALLOW_P2P to 13"
bosheng1 Apr 16, 2025
606307f
Revert "drm/virtio: Add multiple planar format buffer support"
bosheng1 Apr 16, 2025
6fac09d
Revert "drm/virtio: Add sprite planes support for virtio gpu"
bosheng1 Apr 16, 2025
636940c
Revert "drm/virtio: Show more capabilities in debugfs"
bosheng1 Apr 16, 2025
d940c5d
Revert "drm/virtio: Set peer2peer flag when VIRTIO_GPU_F_ALLOW_P2P is…
bosheng1 Apr 16, 2025
92553d0
Revert "drm/virtio: Add allow_p2p capability"
bosheng1 Apr 16, 2025
a3620e3
Revert "drm/virtio: Use DMA API in PRIME importing code"
bosheng1 Apr 16, 2025
302e46d
Revert "drm/virtio: Support tile-4 modifier"
bosheng1 Apr 16, 2025
02ea52e
Revert "drm/virtio: improve virtio gpu vblank operation"
bosheng1 Apr 16, 2025
177dbbd
Revert "drm/virtio: enable vblank for crtc"
bosheng1 Apr 16, 2025
5849567
Revert "drm/virtio: suppport configurable queue number"
bosheng1 Apr 16, 2025
85a704c
Revert "drm/virtio: add scaling support"
bosheng1 Apr 16, 2025
ffaeefc
Revert "drivers: virtgpu: Add support for gem buffer import"
bosheng1 Apr 16, 2025
a65c9ed
Revert "drivers: virtgpu: Add virtio-gpu tiling format support"
bosheng1 Apr 16, 2025
a4a0611
Revert "drm/virtio: save and restore virtio_gpu_objects"
bosheng1 Apr 16, 2025
d09e27a
Revert "drm/virtio: freeze and restore hooks to support suspend and r…
bosheng1 Apr 16, 2025
883d883
Revert "Revert "drm/virtio: Add a helper to map and note the dma addr…
bosheng1 Apr 16, 2025
9350097
Revert "Revert "drm/virtio: Add helpers to initialize and free the im…
bosheng1 Apr 16, 2025
2424acf
Revert "Revert "drm/virtio: Import prime buffers from other devices a…
bosheng1 Apr 16, 2025
d01d991
Revert "Revert "drm/virtio: Add prepare and cleanup routines for impo…
bosheng1 Apr 16, 2025
bb6761b
drm/virtio: freeze and restore hooks to support suspend and resume
downor Jun 28, 2022
59530b6
drm/virtio: save and restore virtio_gpu_objects
downor Aug 18, 2022
8c142a1
drivers: virtgpu: Add virtio-gpu tiling format support
yfliuuu Jul 22, 2023
f2f64d1
drm/virtio: add scaling support
liuhang-bit Jul 11, 2023
4847451
drm/virtio: suppport configurable queue number
liuhang-bit Jul 1, 2023
86f5db5
drm/virtio: enable vblank for crtc
liuhang-bit Jul 1, 2023
66eaa17
drm/virtio: improve virtio gpu vblank operation
bosheng1 Apr 10, 2024
2239a6a
drm/virtio: Support tile-4 modifier
yhe39 Feb 23, 2024
eb358d4
drm/virtio: put set modifier ahead of set scanout
bosheng1 Oct 25, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 0 additions & 28 deletions drivers/gpu/drm/virtio/virtgpu_debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,6 @@ static int virtio_gpu_features(struct seq_file *m, void *data)

virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob);
virtio_gpu_add_bool(m, "context init", vgdev->has_context_init);
virtio_gpu_add_bool(m, "scaling", vgdev->has_scaling);
virtio_gpu_add_bool(m, "allow_p2p", vgdev->has_allow_p2p);
virtio_gpu_add_bool(m, "flip_sequence", vgdev->has_flip_sequence);
virtio_gpu_add_bool(m, "multi_plane", vgdev->has_multi_plane);
virtio_gpu_add_bool(m, "rotation", vgdev->has_rotation);
virtio_gpu_add_bool(m, "pixel_blend_mode", vgdev->has_pixel_blend_mode);
virtio_gpu_add_bool(m, "multi_planar", vgdev->has_multi_planar);
virtio_gpu_add_bool(m, "modifier", vgdev->has_modifier);
virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets);
virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts);
if (vgdev->host_visible_region.len) {
Expand Down Expand Up @@ -102,30 +94,10 @@ virtio_gpu_debugfs_host_visible_mm(struct seq_file *m, void *data)
return 0;
}

static int
virtio_gpu_debugfs_objects(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
struct virtio_gpu_object_restore *curr, *tmp;

list_for_each_entry_safe(curr, tmp, &vgdev->obj_rec, node) {
seq_printf(m, "hw_res_handle=%u, prime=%d\n",
curr->bo->hw_res_handle, curr->bo->prime);
if (curr->bo->prime)
for (unsigned i = 0; i < curr->bo->nents; ++i)
seq_printf(m, "\taddr=%lx, size=%x\n",
curr->bo->ents[i].addr,
curr->bo->ents[i].length);
}
return 0;
}

static struct drm_info_list virtio_gpu_debugfs_list[] = {
{ "virtio-gpu-features", virtio_gpu_features },
{ "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL },
{ "virtio-gpu-host-visible-mm", virtio_gpu_debugfs_host_visible_mm },
{ "virtio-gpu-objects", virtio_gpu_debugfs_objects },
};

#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
Expand Down
243 changes: 43 additions & 200 deletions drivers/gpu/drm/virtio/virtgpu_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,8 @@ static int virtio_irq_enable_vblank(struct drm_crtc *crtc)
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);

do {
virtio_gpu_vblank_poll_arm(vgdev->vblank[output->index].vblank.vq);
} while (!virtqueue_enable_cb(vgdev->vblank[output->index].vblank.vq));
virtio_gpu_vblank_poll_arm(vgdev->vblank[output->index].vblank.vq);
virtqueue_enable_cb(vgdev->vblank[output->index].vblank.vq);
return 0;
}

Expand Down Expand Up @@ -89,6 +88,26 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
.dirty = drm_atomic_helper_dirtyfb,
};

static int
virtio_gpu_framebuffer_init(struct drm_device *dev,
struct virtio_gpu_framebuffer *vgfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;

vgfb->base.obj[0] = obj;

drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);

ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
vgfb->base.obj[0] = NULL;
return ret;
}
return 0;
}

static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
Expand Down Expand Up @@ -117,94 +136,20 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
const unsigned pipe = drm_crtc_index(crtc);

struct drm_pending_vblank_event *e = xchg(&vgdev->cache_event[pipe], NULL);
/* Send cached event even it's still premature. */
if (e) {
spin_lock_irq(&dev->event_lock);
drm_crtc_send_vblank_event(crtc, e);
spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
}

if (vgdev->has_vblank) {
if(vgdev->has_vblank) {
drm_crtc_vblank_off(crtc);
}

virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
virtio_gpu_notify(vgdev);
}

static void virtio_gpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
struct drm_device *drm = crtc->dev;
const unsigned pipe = drm_crtc_index(crtc);
struct drm_pending_vblank_event *old_e, *e = crtc->state->event;

if (!vgdev->has_vblank || !crtc->state->event)
return;

if (drm_crtc_vblank_get(crtc)) {
/* Cannot enable vblank, send it right now. */
spin_lock_irq(&drm->event_lock);
drm_crtc_send_vblank_event(crtc, e);
spin_unlock_irq(&drm->event_lock);
crtc->state->event = NULL;
return;
}

if (!vgdev->has_flip_sequence) {
spin_lock_irq(&drm->event_lock);
/* Let drm_handle_vblank signal it later in the vblank interrupt
* and the vblank refcount will be released at that time. */
drm_crtc_arm_vblank_event(crtc, e);
spin_unlock_irq(&drm->event_lock);
} else {
crtc->state->event->sequence =
atomic64_read(&vgdev->flip_sequence[pipe]) + 1;
old_e = xchg(&vgdev->cache_event[pipe], crtc->state->event);
if (old_e) {
spin_lock_irq(&drm->event_lock);
drm_crtc_send_vblank_event(crtc, old_e);
spin_unlock_irq(&drm->event_lock);
drm_crtc_vblank_put(crtc);
}
}
crtc->state->event = NULL;
}

static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct virtio_gpu_output *output = NULL;
struct drm_device *dev = crtc->dev;
int num_scalers_need;
struct virtio_gpu_device *vgdev = dev->dev_private;

output = drm_crtc_to_virtio_gpu_output(crtc);
if(vgdev->has_scaling) {
num_scalers_need = hweight32(output->scaler_users);
if(num_scalers_need > SKL_NUM_SCALERS) {
drm_dbg_kms(dev, "Too many scaling requests %d > %d\n", num_scalers_need, SKL_NUM_SCALERS);
output->scaler_users = 0;
return -EINVAL;
}
}
return 0;
}

static void virtio_gpu_resource_flush_sync(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
virtio_gpu_cmd_flush_sync(vgdev, output->index);
virtio_gpu_notify(vgdev);
}

static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
Expand All @@ -213,13 +158,18 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
struct drm_device *drm = crtc->dev;
struct virtio_gpu_device *vgdev = drm->dev_private;
const unsigned pipe = drm_crtc_index(crtc);

if(vgdev->has_multi_plane)
virtio_gpu_resource_flush_sync(crtc);

if(vgdev->has_scaling)
output->scaler_users = 0;
if(vgdev->has_vblank) {
if (crtc->state->event) {
spin_lock_irq(&drm->event_lock);
if (drm_crtc_vblank_get(crtc) != 0)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
else
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
spin_unlock_irq(&drm->event_lock);
crtc->state->event = NULL;
}
}

/*
* virtio-gpu can't do modeset and plane update operations
Expand All @@ -234,7 +184,6 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,

static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
.atomic_begin = virtio_gpu_crtc_atomic_begin,
.atomic_check = virtio_gpu_crtc_atomic_check,
.atomic_flush = virtio_gpu_crtc_atomic_flush,
.atomic_enable = virtio_gpu_crtc_atomic_enable,
Expand Down Expand Up @@ -363,25 +312,12 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
output->info.r.height = cpu_to_le32(YRES_DEF);
}

if(vgdev->has_scaling)
output->scaler_users = 0;
primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index);
if (IS_ERR(primary))
return PTR_ERR(primary);
cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor))
return PTR_ERR(cursor);

if(vgdev->has_multi_plane) {
struct drm_plane *sprite;
int i;
for(i=0; i< vgdev->outputs[index].plane_num; i++) {
sprite = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_OVERLAY, index);
if (IS_ERR(sprite))
return PTR_ERR(sprite);
}
}

drm_crtc_init_with_planes(dev, crtc, primary, cursor,
&virtio_gpu_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
Expand All @@ -408,46 +344,27 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
{
struct drm_gem_object *obj = NULL;
struct virtio_gpu_framebuffer *virtio_gpu_fb;

const struct drm_format_info *info;
struct drm_gem_object *objs[DRM_FORMAT_MAX_PLANES];

unsigned int i;
int ret;

info = drm_get_format_info(dev, mode_cmd);
if (!info) {
drm_dbg_kms(dev, "Failed to get FB format info\n");
/* lookup object associated with res handle */
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!obj)
return ERR_PTR(-EINVAL);
}

virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL);
if (virtio_gpu_fb == NULL) {
drm_gem_object_put(obj);
return ERR_PTR(-ENOMEM);
}

for (i = 0; i < info->num_planes; i++) {
objs[i] = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!objs[i]) {
drm_dbg_kms(dev, "Failed to lookup GEM object\n");
goto error;
}
virtio_gpu_fb->base.obj[i] = objs[i];
ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
drm_gem_object_put(obj);
return NULL;
}

drm_helper_mode_fill_fb_struct(dev, &virtio_gpu_fb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &virtio_gpu_fb->base, &virtio_gpu_fb_funcs);
if (ret)
goto error;

return &virtio_gpu_fb->base;
error:
kfree(virtio_gpu_fb);
while (i > 0) {
--i;
drm_gem_object_put(objs[i]);
}
return ERR_PTR(ret);
}

static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
Expand All @@ -456,79 +373,6 @@ static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};

static void
virtio_gpu_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i, ret;
unsigned int crtc_mask = 0;

/*
* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that (by doing tons of cursor updates).
*/
if (old_state->legacy_cursor_update)
return;

for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
if (!new_crtc_state->active)
continue;

ret = drm_crtc_vblank_get(crtc);
if (ret != 0)
continue;

crtc_mask |= drm_crtc_mask(crtc);
old_state->crtcs[i].last_vblank_count =
vgdev->has_vblank && vgdev->has_flip_sequence ?
atomic64_read(&vgdev->flip_sequence[i]) :
drm_crtc_vblank_count(crtc);
}

for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
if (!(crtc_mask & drm_crtc_mask(crtc)))
continue;

ret = wait_event_timeout(dev->vblank[i].queue,
old_state->crtcs[i].last_vblank_count !=
(vgdev->has_vblank && vgdev->has_flip_sequence ?
atomic64_read(&vgdev->flip_sequence[i]) :
drm_crtc_vblank_count(crtc)),
msecs_to_jiffies(100));

WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
crtc->base.id, crtc->name);

drm_crtc_vblank_put(crtc);
}
}

static void virtio_gpu_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;

drm_atomic_helper_commit_modeset_disables(dev, old_state);

drm_atomic_helper_commit_planes(dev, old_state, 0);

drm_atomic_helper_commit_modeset_enables(dev, old_state);

drm_atomic_helper_fake_vblank(old_state);

drm_atomic_helper_commit_hw_done(old_state);

virtio_gpu_wait_for_vblanks(dev, old_state);

drm_atomic_helper_cleanup_planes(dev, old_state);
}

static struct drm_mode_config_helper_funcs virtgio_gpu_mode_config_helpers = {
.atomic_commit_tail = virtio_gpu_commit_tail,
};

int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
{
int i, ret;
Expand All @@ -541,7 +385,6 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
return ret;

vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
vgdev->ddev->mode_config.helper_private = &virtgio_gpu_mode_config_helpers;

/* modes will be validated against the framebuffer size */
vgdev->ddev->mode_config.min_width = XRES_MIN;
Expand Down
Loading