Skip to content

Commit

Permalink
drm/virtio: Use flip sequence from virtio-GPU back-end
Browse files Browse the repository at this point in the history
Introduce per-CRTC flip counter that should be incremented when flip
happens in back-end. Atomic commit worker should await change of the new
flip sequence rather than vblank sequence. Present fences are also
cached and signaled when flip happens.

We introduce flip sequence feature for backward compatibility, meaning
that we must wait for flip event only when we have flip sequence feature
negotiated. This is to avoid breaks old hypervisor/device model.

Tracked-On: OAM-128741
Signed-off-by: hangliu1 <[email protected]>
Signed-off-by: Xue, Bosheng <[email protected]>
Signed-off-by: Weifeng Liu <[email protected]>
  • Loading branch information
phreer committed Dec 23, 2024
1 parent bd445d6 commit 0390a3c
Show file tree
Hide file tree
Showing 7 changed files with 165 additions and 23 deletions.
1 change: 1 addition & 0 deletions drivers/gpu/drm/virtio/virtgpu_debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
virtio_gpu_add_bool(m, "context init", vgdev->has_context_init);
virtio_gpu_add_bool(m, "scaling", vgdev->has_scaling);
virtio_gpu_add_bool(m, "allow_p2p", vgdev->has_allow_p2p);
virtio_gpu_add_bool(m, "flip_sequence", vgdev->has_flip_sequence);
virtio_gpu_add_bool(m, "multi_plane", vgdev->has_multi_plane);
virtio_gpu_add_bool(m, "rotation", vgdev->has_rotation);
virtio_gpu_add_bool(m, "pixel_blend_mode", vgdev->has_pixel_blend_mode);
Expand Down
132 changes: 119 additions & 13 deletions drivers/gpu/drm/virtio/virtgpu_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,9 @@ static int virtio_irq_enable_vblank(struct drm_crtc *crtc)
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);

virtio_gpu_vblank_poll_arm(vgdev->vblank[output->index].vblank.vq);
virtqueue_enable_cb(vgdev->vblank[output->index].vblank.vq);
do {
virtio_gpu_vblank_poll_arm(vgdev->vblank[output->index].vblank.vq);
} while (!virtqueue_enable_cb(vgdev->vblank[output->index].vblank.vq));
return 0;
}

Expand Down Expand Up @@ -124,6 +125,46 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
virtio_gpu_notify(vgdev);
}

static void virtio_gpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
struct drm_device *drm = crtc->dev;
const unsigned pipe = drm_crtc_index(crtc);
struct drm_pending_vblank_event *e = crtc->state->event;

if (!vgdev->has_vblank || !crtc->state->event)
return;

if (drm_crtc_vblank_get(crtc)) {
/* Cannot enable vblank, send it right now. */
spin_lock_irq(&drm->event_lock);
drm_crtc_send_vblank_event(crtc, e);
spin_unlock_irq(&drm->event_lock);
crtc->state->event = NULL;
return;
}

if (!vgdev->has_flip_sequence) {
spin_lock_irq(&drm->event_lock);
/* Let drm_handle_vblank signal it later in the vblank interrupt
* and the vblank refcount will be released at that time. */
drm_crtc_arm_vblank_event(crtc, e);
spin_unlock_irq(&drm->event_lock);
} else {
crtc->state->event->sequence =
atomic64_read(&vgdev->flip_sequence[pipe]) + 1;
if (vgdev->cache_event[pipe] != NULL) {
spin_lock_irq(&drm->event_lock);
drm_crtc_send_vblank_event(crtc, vgdev->cache_event[pipe]);
spin_unlock_irq(&drm->event_lock);
drm_crtc_vblank_put(crtc);
}
vgdev->cache_event[pipe] = crtc->state->event;
}
crtc->state->event = NULL;
}

static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
Expand Down Expand Up @@ -161,18 +202,8 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
struct drm_device *drm = crtc->dev;
struct virtio_gpu_device *vgdev = drm->dev_private;
const unsigned pipe = drm_crtc_index(crtc);

if(vgdev->has_vblank) {
if (crtc->state->event) {
spin_lock_irq(&drm->event_lock);
if (drm_crtc_vblank_get(crtc) != 0)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
else
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
spin_unlock_irq(&drm->event_lock);
crtc->state->event = NULL;
}
}
if(vgdev->has_multi_plane)
virtio_gpu_resource_flush_sync(crtc);

Expand All @@ -192,6 +223,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,

static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
.atomic_begin = virtio_gpu_crtc_atomic_begin,
.atomic_check = virtio_gpu_crtc_atomic_check,
.atomic_flush = virtio_gpu_crtc_atomic_flush,
.atomic_enable = virtio_gpu_crtc_atomic_enable,
Expand Down Expand Up @@ -415,6 +447,79 @@ static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};

static void
virtio_gpu_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i, ret;
unsigned int crtc_mask = 0;

/*
* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that (by doing tons of cursor updates).
*/
if (old_state->legacy_cursor_update)
return;

for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
if (!new_crtc_state->active)
continue;

ret = drm_crtc_vblank_get(crtc);
if (ret != 0)
continue;

crtc_mask |= drm_crtc_mask(crtc);
old_state->crtcs[i].last_vblank_count =
vgdev->has_vblank && vgdev->has_flip_sequence ?
atomic64_read(&vgdev->flip_sequence[i]) :
drm_crtc_vblank_count(crtc);
}

for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
if (!(crtc_mask & drm_crtc_mask(crtc)))
continue;

ret = wait_event_timeout(dev->vblank[i].queue,
old_state->crtcs[i].last_vblank_count !=
(vgdev->has_vblank && vgdev->has_flip_sequence ?
atomic64_read(&vgdev->flip_sequence[i]) :
drm_crtc_vblank_count(crtc)),
msecs_to_jiffies(100));

WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
crtc->base.id, crtc->name);

drm_crtc_vblank_put(crtc);
}
}

static void virtio_gpu_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;

drm_atomic_helper_commit_modeset_disables(dev, old_state);

drm_atomic_helper_commit_planes(dev, old_state, 0);

drm_atomic_helper_commit_modeset_enables(dev, old_state);

drm_atomic_helper_fake_vblank(old_state);

drm_atomic_helper_commit_hw_done(old_state);

virtio_gpu_wait_for_vblanks(dev, old_state);

drm_atomic_helper_cleanup_planes(dev, old_state);
}

static struct drm_mode_config_helper_funcs virtgio_gpu_mode_config_helpers = {
.atomic_commit_tail = virtio_gpu_commit_tail,
};

int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
{
int i, ret;
Expand All @@ -427,6 +532,7 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
return ret;

vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
vgdev->ddev->mode_config.helper_private = &virtgio_gpu_mode_config_helpers;

/* modes will be validated against the framebuffer size */
vgdev->ddev->mode_config.min_width = XRES_MIN;
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/virtio/virtgpu_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,7 @@ static unsigned int features[] = {
VIRTIO_GPU_F_SCALING,
VIRTIO_GPU_F_VBLANK,
VIRTIO_GPU_F_ALLOW_P2P,
VIRTIO_GPU_F_FLIP_SEQUENCE,
VIRTIO_GPU_F_MULTI_PLANE,
VIRTIO_GPU_F_ROTATION,
VIRTIO_GPU_F_PIXEL_BLEND_MODE,
Expand Down
11 changes: 11 additions & 0 deletions drivers/gpu/drm/virtio/virtgpu_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,9 @@ struct virtio_gpu_vbuffer {
#define VIRTIO_GPU_MAX_PLANES 6
/*hardcode igpu scaler number ver>11 */
#define SKL_NUM_SCALERS 2

#define VBLANK_EVENT_CACHE_SIZE 3

struct virtio_gpu_output {
int index;
struct drm_crtc crtc;
Expand Down Expand Up @@ -232,12 +235,19 @@ struct virtio_gpu_vblank {
uint32_t buf[4];
};

static inline bool drm_vblank_passed(u64 seq, u64 ref)
{
return (seq - ref) <= (1 << 23);
}

struct virtio_gpu_device {
struct drm_device *ddev;

struct virtio_device *vdev;

struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
struct drm_pending_vblank_event *cache_event[VIRTIO_GPU_MAX_SCANOUTS];
atomic64_t flip_sequence[VIRTIO_GPU_MAX_SCANOUTS];
uint32_t num_scanouts;
uint32_t num_vblankq;
struct virtio_gpu_queue ctrlq;
Expand All @@ -264,6 +274,7 @@ struct virtio_gpu_device {
bool has_scaling;
bool has_vblank;
bool has_allow_p2p;
bool has_flip_sequence;
bool has_multi_plane;
bool has_rotation;
bool has_pixel_blend_mode;
Expand Down
9 changes: 7 additions & 2 deletions drivers/gpu/drm/virtio/virtgpu_kms.c
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,9 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_ALLOW_P2P)) {
vgdev->has_allow_p2p = true;
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_FLIP_SEQUENCE)) {
vgdev->has_flip_sequence = true;
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_MULTI_PLANE)) {
vgdev->has_multi_plane = true;
}
Expand Down Expand Up @@ -288,8 +291,10 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
vgdev->has_modifier ? '+' : '-',
vgdev->has_multi_plane ? '+' : '-');

DRM_INFO("features: %ccontext_init\n",
vgdev->has_context_init ? '+' : '-');
DRM_INFO("features: %ccontext_init %callow_p2p %cflip_sequence\n",
vgdev->has_context_init ? '+' : '-',
vgdev->has_allow_p2p ? '+' : '-',
vgdev->has_flip_sequence ? '+' : '-');

/* get display info */
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
Expand Down
28 changes: 22 additions & 6 deletions drivers/gpu/drm/virtio/virtgpu_vq.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,21 +100,37 @@ void virtio_gpu_vblank_ack(struct virtqueue *vq)
unsigned long irqflags;
unsigned int len;
unsigned int *ret_value;
int target = 0;
unsigned target = 0;

while((target < vgdev->num_vblankq) && (vgdev->vblank[target].vblank.vq != vq)) {
target++;
}

spin_lock_irqsave(&vgdev->vblank[target].vblank.qlock, irqflags);
if((ret_value = virtqueue_get_buf(vgdev->vblank[target].vblank.vq, &len)) != NULL) {

while ((ret_value = virtqueue_get_buf(vgdev->vblank[target].vblank.vq, &len)) != NULL) {
spin_lock_irqsave(&vgdev->vblank[target].vblank.qlock, irqflags);
virtgpu_irqqueue_buf(vgdev->vblank[target].vblank.vq, ret_value);
spin_unlock_irqrestore(&vgdev->vblank[target].vblank.qlock, irqflags);

drm_handle_vblank(dev, target);

if (*ret_value != 0) {
atomic64_set(&vgdev->flip_sequence[target], *ret_value);
}

}

spin_unlock_irqrestore(&vgdev->vblank[target].vblank.qlock, irqflags);
drm_handle_vblank(dev, target);
if (!vgdev->has_flip_sequence)
return;

struct drm_pending_vblank_event *e = vgdev->cache_event[target];
if (e && drm_vblank_passed(atomic64_read(&vgdev->flip_sequence[target]),
e->sequence)) {
spin_lock_irqsave(&dev->event_lock, irqflags);
drm_crtc_send_vblank_event(&vgdev->outputs[target].crtc, e);
vgdev->cache_event[target] = NULL;
spin_unlock_irqrestore(&dev->event_lock, irqflags);
drm_crtc_vblank_put(&vgdev->outputs[target].crtc);
}
}

void virtio_gpu_cursor_ack(struct virtqueue *vq)
Expand Down
6 changes: 4 additions & 2 deletions include/uapi/linux/virtio_gpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,6 @@

#define VIRTIO_GPU_F_VBLANK 7

#define VIRTIO_GPU_F_ALLOW_P2P 13

/*
* VIRTIO_GPU_CMD_FLUSH_SPRITE
* VIRTIO_GPU_CMD_FLUSH_SYNC
Expand All @@ -94,6 +92,10 @@

#define VIRTIO_GPU_F_MULTI_PLANAR_FORMAT 12

#define VIRTIO_GPU_F_ALLOW_P2P 13

#define VIRTIO_GPU_F_FLIP_SEQUENCE 14

#define VIRTIO_GPU_TUNNEL_CMD_SET_ROTATION 1
#define VIRTIO_GPU_TUNNEL_CMD_SET_BLEND 2
#define VIRTIO_GPU_TUNNEL_CMD_SET_PLANARS 3
Expand Down

0 comments on commit 0390a3c

Please sign in to comment.