hwtransfer: use the right hardware config to find conversion targets

The last piece in the puzzle for doing hardware conversions
automatically is ensuring we only consider valid target formats for the
conversion. Although it is unintuitive, some vaapi drivers can expose a
different set of formats for uploads vs for conversions, and that is
the case on the Intel hardware I have here.

Before this change, we would use the upload target list, and our
selection algorithm would pick a format that doesn't work for
conversions, causing everything to fail. Whoops.

Successfully obtaining the conversion target format list is a bit of a
convoluted process, with only parts of it encapsulated by ffmpeg.
Specifically, ffmpeg understands the concept of hardware configurations
that can affect the constraints of a device, but does not define what
configurations are - that is left up to the specific hwdevice type.

In the case of vaapi, we need to create a config for the video
processing endpoint, and use that when querying for constraints.

I decided to encapsulate creation of the config as part of the hwdec
init process, so that the constraint query can be down in the
hwtransfer code in an opaque way. I don't know if any other hardware
will need this capability, but if so, we'll be able to account for it.

Then, when we look at probing, instead of checking for what formats
are supported for transfers, we use the results of the constraint query
with the conversion config. And as that config doesn't depend on the
source format, we only need to do it once.
This commit is contained in:
Philip Langdale 2023-08-03 10:45:35 +08:00 committed by Philip Langdale
parent 7d7ef05f10
commit 19ea8b31bd
4 changed files with 84 additions and 24 deletions

View File

@ -260,7 +260,7 @@ static bool vo_supports(struct mp_hwdec_ctx *ctx, int hw_fmt, int sw_fmt)
return false;
}
static bool probe_formats(struct mp_filter *f, int hw_imgfmt)
static bool probe_formats(struct mp_filter *f, int hw_imgfmt, bool use_conversion_filter)
{
struct priv *p = f->priv;
@ -276,6 +276,7 @@ static bool probe_formats(struct mp_filter *f, int hw_imgfmt)
struct mp_hwdec_ctx *ctx = NULL;
AVHWFramesConstraints *cstr = NULL;
AVHWFramesConstraints *conversion_cstr = NULL;
struct hwdec_imgfmt_request params = {
.imgfmt = hw_imgfmt,
@ -326,6 +327,16 @@ static bool probe_formats(struct mp_filter *f, int hw_imgfmt)
}
}
if (use_conversion_filter) {
// We will not be doing a transfer, so do not probe for transfer
// formats. This can produce incorrect results. Instead, we need to
// obtain the constraints for a conversion configuration.
conversion_cstr =
av_hwdevice_get_hwframe_constraints(ctx->av_device_ref,
ctx->conversion_config);
}
for (int n = 0; cstr->valid_sw_formats &&
cstr->valid_sw_formats[n] != AV_PIX_FMT_NONE; n++)
{
@ -345,19 +356,10 @@ static bool probe_formats(struct mp_filter *f, int hw_imgfmt)
continue;
}
// Creates an AVHWFramesContexts with the given parameters.
AVBufferRef *frames = NULL;
if (!mp_update_av_hw_frames_pool(&frames, ctx->av_device_ref,
hw_imgfmt, imgfmt, 128, 128, false))
{
MP_WARN(f, "failed to allocate pool\n");
continue;
}
enum AVPixelFormat *fmts;
if (av_hwframe_transfer_get_formats(frames,
AV_HWFRAME_TRANSFER_DIRECTION_TO, &fmts, 0) >= 0)
{
if (use_conversion_filter) {
// The conversion constraints are universal, and do not vary with
// source format, so we will associate the same set of target formats
// with all source formats.
int index = p->num_fmts;
MP_TARRAY_APPEND(p, p->fmts, p->num_fmts, imgfmt);
MP_TARRAY_GROW(p, p->fmt_upload_index, index);
@ -365,7 +367,8 @@ static bool probe_formats(struct mp_filter *f, int hw_imgfmt)
p->fmt_upload_index[index] = p->num_upload_fmts;
for (int i = 0; fmts[i] != AV_PIX_FMT_NONE; i++) {
enum AVPixelFormat *fmts = conversion_cstr->valid_sw_formats;
for (int i = 0; fmts && fmts[i] != AV_PIX_FMT_NONE; i++) {
int fmt = pixfmt2imgfmt(fmts[i]);
if (!fmt)
continue;
@ -379,14 +382,51 @@ static bool probe_formats(struct mp_filter *f, int hw_imgfmt)
p->fmt_upload_num[index] =
p->num_upload_fmts - p->fmt_upload_index[index];
} else {
// Creates an AVHWFramesContexts with the given parameters.
AVBufferRef *frames = NULL;
if (!mp_update_av_hw_frames_pool(&frames, ctx->av_device_ref,
hw_imgfmt, imgfmt, 128, 128, false))
{
MP_WARN(f, "failed to allocate pool\n");
continue;
}
av_free(fmts);
enum AVPixelFormat *fmts;
if (av_hwframe_transfer_get_formats(frames,
AV_HWFRAME_TRANSFER_DIRECTION_TO, &fmts, 0) >= 0)
{
int index = p->num_fmts;
MP_TARRAY_APPEND(p, p->fmts, p->num_fmts, imgfmt);
MP_TARRAY_GROW(p, p->fmt_upload_index, index);
MP_TARRAY_GROW(p, p->fmt_upload_num, index);
p->fmt_upload_index[index] = p->num_upload_fmts;
for (int i = 0; fmts[i] != AV_PIX_FMT_NONE; i++) {
int fmt = pixfmt2imgfmt(fmts[i]);
if (!fmt)
continue;
MP_VERBOSE(f, " supports %s\n", mp_imgfmt_to_name(fmt));
if (!vo_supports(ctx, hw_imgfmt, fmt)) {
MP_VERBOSE(f, " ... not supported by VO\n");
continue;
}
MP_TARRAY_APPEND(p, p->upload_fmts, p->num_upload_fmts, fmt);
}
p->fmt_upload_num[index] =
p->num_upload_fmts - p->fmt_upload_index[index];
av_free(fmts);
}
av_buffer_unref(&frames);
}
av_buffer_unref(&frames);
}
av_hwframe_constraints_free(&cstr);
av_hwframe_constraints_free(&conversion_cstr);
p->av_device_ctx = av_buffer_ref(ctx->av_device_ref);
if (!p->av_device_ctx)
return false;
@ -407,7 +447,7 @@ struct mp_hwupload mp_hwupload_create(struct mp_filter *parent, int hw_imgfmt,
mp_filter_add_pin(f, MP_PIN_IN, "in");
mp_filter_add_pin(f, MP_PIN_OUT, "out");
if (!probe_formats(f, hw_imgfmt)) {
if (!probe_formats(f, hw_imgfmt, src_is_same_hw)) {
MP_INFO(f, "hardware format not supported\n");
goto fail;
}

View File

@ -23,6 +23,10 @@ struct mp_hwdec_ctx {
// This will be used for hardware conversion of frame formats.
// NULL otherwise.
const char *conversion_filter_name;
// The libavutil hwconfig to be used when querying constraints for the
// conversion filter. Can be NULL if no special config is required.
void *conversion_config;
};
// Used to communicate hardware decoder device handles from VO to video decoder.

View File

@ -54,7 +54,8 @@ static bool map(struct ra_hwdec_mapper *mapper,
return false;
}
MP_VERBOSE(mapper, "Supported Wayland display format: '%s(%016lx)'\n",
MP_VERBOSE(mapper, "Supported Wayland display format %s: '%s(%016lx)'\n",
mp_imgfmt_to_name(mapper->src->params.hw_subfmt),
mp_tag_str(drm_format), mapper_p->desc.objects[0].format_modifier);
return true;

View File

@ -113,8 +113,14 @@ struct priv_owner {
static void uninit(struct ra_hwdec *hw)
{
struct priv_owner *p = hw->priv;
if (p->ctx)
if (p->ctx) {
hwdec_devices_remove(hw->devs, &p->ctx->hwctx);
if (p->ctx->hwctx.conversion_config) {
AVVAAPIHWConfig *hwconfig = p->ctx->hwctx.conversion_config;
vaDestroyConfig(p->ctx->display, hwconfig->config_id);
av_freep(&p->ctx->hwctx.conversion_config);
}
}
va_destroy(p->ctx);
}
@ -131,11 +137,10 @@ const static dmabuf_interop_init interop_inits[] = {
NULL
};
const static char *conversion_filter_name = "scale_vaapi";
static int init(struct ra_hwdec *hw)
{
struct priv_owner *p = hw->priv;
VAStatus vas;
for (int i = 0; interop_inits[i]; i++) {
if (interop_inits[i](hw, &p->dmabuf_interop)) {
@ -173,13 +178,23 @@ static int init(struct ra_hwdec *hw)
return -1;
}
VAConfigID config_id;
AVVAAPIHWConfig *hwconfig = NULL;
vas = vaCreateConfig(p->display, VAProfileNone, VAEntrypointVideoProc, NULL,
0, &config_id);
if (vas == VA_STATUS_SUCCESS) {
hwconfig = av_hwdevice_hwconfig_alloc(p->ctx->av_device_ref);
hwconfig->config_id = config_id;
}
// it's now safe to set the display resource
ra_add_native_resource(hw->ra_ctx->ra, "VADisplay", p->display);
p->ctx->hwctx.hw_imgfmt = IMGFMT_VAAPI;
p->ctx->hwctx.supported_formats = p->formats;
p->ctx->hwctx.driver_name = hw->driver->name;
p->ctx->hwctx.conversion_filter_name = conversion_filter_name;
p->ctx->hwctx.conversion_filter_name = "scale_vaapi";
p->ctx->hwctx.conversion_config = hwconfig;
hwdec_devices_add(hw->devs, &p->ctx->hwctx);
return 0;
}