aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--DOCS/interface-changes.rst7
-rw-r--r--DOCS/man/vf.rst4
-rw-r--r--common/common.c11
-rw-r--r--common/common.h2
-rw-r--r--filters/f_auto_filters.c244
-rw-r--r--filters/f_auto_filters.h10
-rw-r--r--filters/f_autoconvert.c288
-rw-r--r--filters/f_autoconvert.h39
-rw-r--r--filters/f_hwtransfer.c299
-rw-r--r--filters/f_hwtransfer.h32
-rw-r--r--filters/f_lavfi.c952
-rw-r--r--filters/f_lavfi.h30
-rw-r--r--filters/f_output_chain.c564
-rw-r--r--filters/f_output_chain.h59
-rw-r--r--filters/f_swscale.c148
-rw-r--r--filters/f_swscale.h25
-rw-r--r--filters/f_utils.c175
-rw-r--r--filters/f_utils.h72
-rw-r--r--filters/filter.c790
-rw-r--r--filters/filter.h379
-rw-r--r--filters/filter_internal.h144
-rw-r--r--filters/frame.c179
-rw-r--r--filters/frame.h55
-rw-r--r--filters/user_filters.c119
-rw-r--r--filters/user_filters.h29
-rw-r--r--options/m_option.h1
-rw-r--r--options/options.c14
-rw-r--r--options/options.h7
-rw-r--r--player/command.c50
-rw-r--r--player/core.h12
-rw-r--r--player/loadfile.c3
-rw-r--r--player/playloop.c5
-rw-r--r--player/sub.c3
-rw-r--r--player/video.c292
-rw-r--r--video/d3d.c3
-rw-r--r--video/filter/refqueue.c192
-rw-r--r--video/filter/refqueue.h18
-rw-r--r--video/filter/vf.c797
-rw-r--r--video/filter/vf.h179
-rw-r--r--video/filter/vf_convert.c133
-rw-r--r--video/filter/vf_d3d11vpp.c318
-rw-r--r--video/filter/vf_format.c135
-rw-r--r--video/filter/vf_lavfi.c517
-rw-r--r--video/filter/vf_sub.c177
-rw-r--r--video/filter/vf_vapoursynth.c682
-rw-r--r--video/filter/vf_vavpp.c291
-rw-r--r--video/filter/vf_vdpaupp.c175
-rw-r--r--video/fmt-conversion.c3
-rw-r--r--video/hwdec.c25
-rw-r--r--video/hwdec.h10
-rw-r--r--video/img_format.c10
-rw-r--r--video/img_format.h9
-rw-r--r--video/out/d3d11/hwdec_d3d11va.c5
-rw-r--r--video/out/opengl/hwdec_d3d11egl.c5
-rw-r--r--video/out/opengl/hwdec_d3d11eglrgb.c10
-rw-r--r--wscript_build.py14
56 files changed, 5887 insertions, 2864 deletions
diff --git a/DOCS/interface-changes.rst b/DOCS/interface-changes.rst
index 2b36506920..2379470277 100644
--- a/DOCS/interface-changes.rst
+++ b/DOCS/interface-changes.rst
@@ -36,6 +36,13 @@ Interface changes
- deprecate --af=lavrresample. Use the ``--audio-resample-...`` options to
customize resampling, or the libavfilter ``--af=aresample`` filter.
- add --osd-on-seek
+ - remove outfmt sub-parameter from "format" video filter (no replacement)
+ - some behavior changes in the video filter chain, including:
+ - before, using an incompatible filter with hwdec would disable hwdec;
+ now it disables the filter at runtime instead
+ - inserting an incompatible filter with hwdec at runtime would refuse
+ to insert the filter; now it will add it successfully, but disables
+ the filter slightly later
--- mpv 0.28.0 ---
- rename --hwdec=mediacodec option to mediacodec-copy, to reflect
conventions followed by other hardware video decoding APIs
diff --git a/DOCS/man/vf.rst b/DOCS/man/vf.rst
index 5f953b2204..13e80014c4 100644
--- a/DOCS/man/vf.rst
+++ b/DOCS/man/vf.rst
@@ -110,9 +110,7 @@ Available mpv-only filters are:
``<fmt>``
Format name, e.g. rgb15, bgr24, 420p, etc. (default: don't change).
- ``<outfmt>``
- Format name that should be substituted for the output. If they do not
- have the same bytes per pixel and chroma subsampling, it will fail.
+
``<colormatrix>``
Controls the YUV to RGB color space conversion when playing video. There
are various standards. Normally, BT.601 should be used for SD video, and
diff --git a/common/common.c b/common/common.c
index 9ed63e1783..12e8e141ec 100644
--- a/common/common.c
+++ b/common/common.c
@@ -302,3 +302,14 @@ char *mp_tprintf_buf(char *buf, size_t buf_size, const char *format, ...)
va_end(ap);
return buf;
}
+
+char **mp_dup_str_array(void *tctx, char **s)
+{
+ char **r = NULL;
+ int num_r = 0;
+ for (int n = 0; s && s[n]; n++)
+ MP_TARRAY_APPEND(tctx, r, num_r, talloc_strdup(tctx, s[n]));
+ if (r)
+ MP_TARRAY_APPEND(tctx, r, num_r, NULL);
+ return r;
+}
diff --git a/common/common.h b/common/common.h
index 8dd02026f6..224a6e023a 100644
--- a/common/common.h
+++ b/common/common.h
@@ -112,4 +112,6 @@ char *mp_tag_str_buf(char *buf, size_t buf_size, uint32_t tag);
char *mp_tprintf_buf(char *buf, size_t buf_size, const char *format, ...)
PRINTF_ATTRIBUTE(3, 4);
+char **mp_dup_str_array(void *tctx, char **s);
+
#endif /* MPLAYER_MPCOMMON_H */
diff --git a/filters/f_auto_filters.c b/filters/f_auto_filters.c
new file mode 100644
index 0000000000..eac6f745ca
--- /dev/null
+++ b/filters/f_auto_filters.c
@@ -0,0 +1,244 @@
+#include <math.h>
+
+#include "common/common.h"
+#include "common/msg.h"
+#include "options/m_config.h"
+#include "options/options.h"
+#include "video/mp_image.h"
+
+#include "f_auto_filters.h"
+#include "f_swscale.h"
+#include "f_utils.h"
+#include "filter.h"
+#include "filter_internal.h"
+#include "user_filters.h"
+
+struct deint_priv {
+ struct mp_subfilter sub;
+ int prev_imgfmt;
+ int prev_setting;
+ struct m_config_cache *opts;
+};
+
+static void deint_process(struct mp_filter *f)
+{
+ struct deint_priv *p = f->priv;
+
+ if (!mp_subfilter_read(&p->sub))
+ return;
+
+ struct mp_frame frame = p->sub.frame;
+
+ if (mp_frame_is_signaling(frame)) {
+ mp_subfilter_continue(&p->sub);
+ return;
+ }
+
+ if (frame.type != MP_FRAME_VIDEO) {
+ MP_ERR(f, "video input required!\n");
+ mp_filter_internal_mark_failed(f);
+ return;
+ }
+
+ m_config_cache_update(p->opts);
+ struct filter_opts *opts = p->opts->opts;
+
+ if (!opts->deinterlace)
+ mp_subfilter_destroy(&p->sub);
+
+ struct mp_image *img = frame.data;
+
+ if (img->imgfmt == p->prev_imgfmt && p->prev_setting == opts->deinterlace) {
+ mp_subfilter_continue(&p->sub);
+ return;
+ }
+
+ if (!mp_subfilter_drain_destroy(&p->sub))
+ return;
+
+ assert(!p->sub.filter);
+
+ p->prev_imgfmt = img->imgfmt;
+ p->prev_setting = opts->deinterlace;
+ if (!p->prev_setting) {
+ mp_subfilter_continue(&p->sub);
+ return;
+ }
+
+ if (img->imgfmt == IMGFMT_VDPAU) {
+ char *args[] = {"deint", "yes", NULL};
+ p->sub.filter =
+ mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "vdpaupp", args);
+ } else if (img->imgfmt == IMGFMT_VAAPI) {
+ p->sub.filter =
+ mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "vavpp", NULL);
+ } else if (img->imgfmt == IMGFMT_D3D11) {
+ p->sub.filter =
+ mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "d3d11vpp", NULL);
+ } else if (mp_sws_supports_input(img->imgfmt)) {
+ char *args[] = {"mode", "send_field", "deint", "interlaced", NULL};
+ p->sub.filter =
+ mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "yadif", args);
+ } else {
+ MP_ERR(f, "no deinterlace filter available for this format\n");
+ mp_subfilter_continue(&p->sub);
+ return;
+ }
+
+ if (!p->sub.filter)
+ MP_ERR(f, "creating deinterlacer failed\n");
+
+ mp_subfilter_continue(&p->sub);
+}
+
+static void deint_reset(struct mp_filter *f)
+{
+ struct deint_priv *p = f->priv;
+
+ mp_subfilter_reset(&p->sub);
+}
+
+static void deint_destroy(struct mp_filter *f)
+{
+ struct deint_priv *p = f->priv;
+
+ mp_subfilter_reset(&p->sub);
+ TA_FREEP(&p->sub.filter);
+}
+
+static const struct mp_filter_info deint_filter = {
+ .name = "deint",
+ .priv_size = sizeof(struct deint_priv),
+ .process = deint_process,
+ .reset = deint_reset,
+ .destroy = deint_destroy,
+};
+
+struct mp_filter *mp_deint_create(struct mp_filter *parent)
+{
+ struct mp_filter *f = mp_filter_create(parent, &deint_filter);
+ if (!f)
+ return NULL;
+
+ struct deint_priv *p = f->priv;
+
+ p->sub.in = mp_filter_add_pin(f, MP_PIN_IN, "in");
+ p->sub.out = mp_filter_add_pin(f, MP_PIN_OUT, "out");
+
+ p->opts = m_config_cache_alloc(f, f->global, &filter_conf);
+
+ return f;
+}
+
+struct rotate_priv {
+ struct mp_subfilter sub;
+ int prev_rotate;
+ int prev_imgfmt;
+ int target_rotate;
+};
+
+static void rotate_process(struct mp_filter *f)
+{
+ struct rotate_priv *p = f->priv;
+
+ if (!mp_subfilter_read(&p->sub))
+ return;
+
+ struct mp_frame frame = p->sub.frame;
+
+ if (mp_frame_is_signaling(frame)) {
+ mp_subfilter_continue(&p->sub);
+ return;
+ }
+
+ if (frame.type != MP_FRAME_VIDEO) {
+ MP_ERR(f, "video input required!\n");
+ return;
+ }
+
+ struct mp_image *img = frame.data;
+
+ if (img->params.rotate == p->prev_rotate &&
+ img->imgfmt == p->prev_imgfmt)
+ {
+ img->params.rotate = p->target_rotate;
+ mp_subfilter_continue(&p->sub);
+ return;
+ }
+
+ if (!mp_subfilter_drain_destroy(&p->sub))
+ return;
+
+ assert(!p->sub.filter);
+
+ int rotate = p->prev_rotate = img->params.rotate;
+ p->target_rotate = rotate;
+ p->prev_imgfmt = img->imgfmt;
+
+ struct mp_stream_info *info = mp_filter_find_stream_info(f);
+ if (rotate == 0 || (info && info->rotate90 && !(rotate % 90))) {
+ mp_subfilter_continue(&p->sub);
+ return;
+ }
+
+ if (mp_sws_supports_input(img->imgfmt)) {
+ MP_ERR(f, "Video rotation with this format not supported\n");
+ mp_subfilter_continue(&p->sub);
+ return;
+ }
+
+ double angle = rotate / 360.0 * M_PI * 2;
+ char *args[] = {"angle", mp_tprintf(30, "%f", angle),
+ "ow", mp_tprintf(30, "rotw(%f)", angle),
+ "oh", mp_tprintf(30, "roth(%f)", angle),
+ NULL};
+ p->sub.filter =
+ mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "rotate", args);
+
+ if (p->sub.filter) {
+ MP_INFO(f, "Inserting rotation filter.\n");
+ p->target_rotate = 0;
+ } else {
+ MP_ERR(f, "could not create rotation filter\n");
+ }
+
+ mp_subfilter_continue(&p->sub);
+}
+
+static void rotate_reset(struct mp_filter *f)
+{
+ struct rotate_priv *p = f->priv;
+
+ mp_subfilter_reset(&p->sub);
+}
+
+static void rotate_destroy(struct mp_filter *f)
+{
+ struct rotate_priv *p = f->priv;
+
+ mp_subfilter_reset(&p->sub);
+ TA_FREEP(&p->sub.filter);
+}
+
+static const struct mp_filter_info rotate_filter = {
+ .name = "autorotate",
+ .priv_size = sizeof(struct rotate_priv),
+ .process = rotate_process,
+ .reset = rotate_reset,
+ .destroy = rotate_destroy,
+};
+
+struct mp_filter *mp_autorotate_create(struct mp_filter *parent)
+{
+ struct mp_filter *f = mp_filter_create(parent, &rotate_filter);
+ if (!f)
+ return NULL;
+
+ struct rotate_priv *p = f->priv;
+ p->prev_rotate = -1;
+
+ p->sub.in = mp_filter_add_pin(f, MP_PIN_IN, "in");
+ p->sub.out = mp_filter_add_pin(f, MP_PIN_OUT, "out");
+
+ return f;
+}
diff --git a/filters/f_auto_filters.h b/filters/f_auto_filters.h
new file mode 100644
index 0000000000..5f1a99f636
--- /dev/null
+++ b/filters/f_auto_filters.h
@@ -0,0 +1,10 @@
+#pragma once
+
+#include "filter.h"
+
+// A filter which inserts the required deinterlacing filter based on the
+// hardware decode mode and the deinterlace user option.
+struct mp_filter *mp_deint_create(struct mp_filter *parent);
+
+// Rotate according to mp_image.rotate and VO capabilities.
+struct mp_filter *mp_autorotate_create(struct mp_filter *parent);
diff --git a/filters/f_autoconvert.c b/filters/f_autoconvert.c
new file mode 100644
index 0000000000..687a846ae5
--- /dev/null
+++ b/filters/f_autoconvert.c
@@ -0,0 +1,288 @@
+#include "config.h"
+
+#include "common/common.h"
+#include "common/msg.h"
+#include "video/hwdec.h"
+#include "video/mp_image.h"
+
+#include "f_autoconvert.h"
+#include "f_hwtransfer.h"
+#include "f_swscale.h"
+#include "f_utils.h"
+#include "filter.h"
+#include "filter_internal.h"
+
+struct priv {
+ struct mp_log *log;
+
+ struct mp_subfilter sub;
+
+ bool force_update;
+
+ int *imgfmts;
+ int *subfmts;
+ int num_imgfmts;
+
+ // Enable special conversion for the final stage before the VO.
+ bool vo_convert;
+
+ // sws state
+ int in_imgfmt, in_subfmt;
+
+ struct mp_autoconvert public;
+};
+
+// Dummy filter for bundling sub-conversion filters.
+static const struct mp_filter_info convert_filter = {
+ .name = "convert",
+};
+
+// For hw decoding: thing which can convert between underlying surface formats.
+// The filter detects the needed target format from struct mp_hwdec_ctx.
+struct subfmt_conv {
+ int hw_imgfmt;
+ struct mp_filter *(*create)(struct mp_filter *parent);
+};
+
+static const struct subfmt_conv subfmt_converters[] = {
+#if HAVE_D3D_HWACCEL
+ {IMGFMT_D3D11, vf_d3d11_create_outconv},
+#endif
+ {0}
+};
+
+void mp_autoconvert_clear(struct mp_autoconvert *c)
+{
+ struct priv *p = c->f->priv;
+
+ p->num_imgfmts = 0;
+}
+
+void mp_autoconvert_add_imgfmt(struct mp_autoconvert *c, int imgfmt, int subfmt)
+{
+ struct priv *p = c->f->priv;
+
+ MP_TARRAY_GROW(p, p->imgfmts, p->num_imgfmts);
+ MP_TARRAY_GROW(p, p->subfmts, p->num_imgfmts);
+
+ p->imgfmts[p->num_imgfmts] = imgfmt;
+ p->subfmts[p->num_imgfmts] = subfmt;
+
+ p->num_imgfmts += 1;
+ p->force_update = true;
+}
+
+void mp_autoconvert_add_vo_hwdec_subfmts(struct mp_autoconvert *c,
+ struct mp_hwdec_devices *devs)
+{
+ struct priv *p = c->f->priv;
+ assert(devs);
+
+ int prev_format = 0;
+
+ for (int n = 0; ; n++) {
+ struct mp_hwdec_ctx *ctx = hwdec_devices_get_n(devs, n);
+ if (!ctx)
+ break;
+ if (!ctx->hw_imgfmt || !ctx->supported_formats)
+ continue;
+ // Very hacky: don't let d3d11-egl-rgb overwrite d3d11-egl
+ if (ctx->hw_imgfmt == prev_format)
+ continue;
+ prev_format = ctx->hw_imgfmt;
+ // Stupidity: VOs export imgfmt only, so subfmt is always 0. Remove it
+ // to fix it up.
+ for (int i = 0; i < p->num_imgfmts; i++) {
+ if (p->imgfmts[i] != ctx->hw_imgfmt)
+ continue;
+
+ int count = p->num_imgfmts;
+ MP_TARRAY_REMOVE_AT(p->imgfmts, count, i);
+ count = p->num_imgfmts;
+ MP_TARRAY_REMOVE_AT(p->subfmts, count, i);
+ p->num_imgfmts -= 1;
+ break;
+ }
+ for (int i = 0; ctx->supported_formats[i]; i++)
+ mp_autoconvert_add_imgfmt(c, ctx->hw_imgfmt, ctx->supported_formats[i]);
+ }
+
+ p->vo_convert = true;
+}
+
+static void handle_video_frame(struct mp_filter *f)
+{
+ struct priv *p = f->priv;
+
+ struct mp_frame frame = p->sub.frame;
+ if (frame.type != MP_FRAME_VIDEO) {
+ MP_ERR(p, "video input required!\n");
+ mp_filter_internal_mark_failed(f);
+ return;
+ }
+
+ struct mp_image *img = frame.data;
+
+ if (p->force_update)
+ p->in_imgfmt = p->in_subfmt = 0;
+
+ if (img->imgfmt == p->in_imgfmt && img->params.hw_subfmt == p->in_subfmt) {
+ mp_subfilter_continue(&p->sub);
+ return;
+ }
+
+ if (!mp_subfilter_drain_destroy(&p->sub)) {
+ p->in_imgfmt = p->in_subfmt = 0;
+ return;
+ }
+
+ p->in_imgfmt = img->params.imgfmt;
+ p->in_subfmt = img->params.hw_subfmt;
+ p->force_update = false;
+
+ bool different_subfmt = false;
+
+ for (int n = 0; n < p->num_imgfmts; n++) {
+ bool samefmt = img->params.imgfmt == p->imgfmts[n];
+ bool samesubffmt = img->params.hw_subfmt == p->subfmts[n];
+ if (samefmt && !samesubffmt)
+ different_subfmt = true;
+ if (samefmt && (samesubffmt || !p->subfmts[n])) {
+ mp_subfilter_continue(&p->sub);
+ return;
+ }
+ }
+
+ struct mp_stream_info *info = mp_filter_find_stream_info(f);
+
+ struct mp_filter *conv = mp_filter_create(f, &convert_filter);
+ mp_filter_add_pin(conv, MP_PIN_IN, "in");
+ mp_filter_add_pin(conv, MP_PIN_OUT, "out");
+
+ struct mp_filter *filters[2] = {0};
+ bool need_sws = true;
+
+ int *fmts = p->imgfmts;
+ int num_fmts = p->num_imgfmts;
+
+ // Source is sw, all targets are hw -> try to upload.
+ bool sw_to_hw = !IMGFMT_IS_HWACCEL(img->imgfmt);
+ for (int n = 0; n < num_fmts; n++)
+ sw_to_hw &= IMGFMT_IS_HWACCEL(fmts[n]);
+
+ if (sw_to_hw && num_fmts > 0) {
+ // We can probably use this! Very lazy and very approximate.
+ struct mp_hwupload *upload = mp_hwupload_create(conv, fmts[0]);
+ if (upload) {
+ MP_INFO(p, "HW-uploading to %s\n", mp_imgfmt_to_name(fmts[0]));
+ filters[1] = upload->f;
+ fmts = upload->upload_fmts;
+ num_fmts = upload->num_upload_fmts;
+ }
+ } else if (p->vo_convert && different_subfmt && info && info->hwdec_devs) {
+ for (int n = 0; subfmt_converters[n].hw_imgfmt; n++) {
+ if (subfmt_converters[n].hw_imgfmt == img->imgfmt) {
+ MP_INFO(p, "Using HW sub-conversion.\n");
+ filters[1] = subfmt_converters[n].create(conv);
+ if (filters[1]) {
+ need_sws = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (need_sws) {
+ // Create a new conversion filter.
+ struct mp_sws_filter *sws = mp_sws_filter_create(conv);
+ if (!sws) {
+ MP_ERR(p, "error creating conversion filter\n");
+ return;
+ }
+
+ int out = mp_sws_find_best_out_format(img->imgfmt, fmts, num_fmts);
+ if (!out) {
+ MP_ERR(p, "can't find video conversion for %s/%s\n",
+ mp_imgfmt_to_name(img->imgfmt),
+ mp_imgfmt_to_name(img->params.hw_subfmt));
+ talloc_free(conv);
+ mp_filter_internal_mark_failed(f);
+ return;
+ }
+
+ if (out == img->imgfmt) {
+ // Can happen if hwupload goes to same format.
+ talloc_free(sws->f);
+ } else {
+ sws->out_format = out;
+ MP_INFO(p, "Converting %s -> %s\n", mp_imgfmt_to_name(img->imgfmt),
+ mp_imgfmt_to_name(sws->out_format));
+ filters[0] = sws->f;
+ }
+ }
+
+ mp_chain_filters(conv->ppins[0], conv->ppins[1], filters, 2);
+
+ p->sub.filter = conv;
+ mp_subfilter_continue(&p->sub);
+}
+
+static void process(struct mp_filter *f)
+{
+ struct priv *p = f->priv;
+
+ if (!mp_subfilter_read(&p->sub))
+ return;
+
+ struct mp_frame frame = p->sub.frame;
+
+ if (!mp_frame_is_signaling(frame)) {
+ if (p->num_imgfmts) {
+ handle_video_frame(f);
+ return;
+ }
+ }
+
+ mp_subfilter_continue(&p->sub);
+}
+
+static void reset(struct mp_filter *f)
+{
+ struct priv *p = f->priv;
+
+ mp_subfilter_reset(&p->sub);
+}
+
+static void destroy(struct mp_filter *f)
+{
+ struct priv *p = f->priv;
+
+ mp_subfilter_reset(&p->sub);
+ TA_FREEP(&p->sub.filter);
+}
+
+static const struct mp_filter_info autoconvert_filter = {
+ .name = "autoconvert",
+ .priv_size = sizeof(struct priv),
+ .process = process,
+ .reset = reset,
+ .destroy = destroy,
+};
+
+struct mp_autoconvert *mp_autoconvert_create(struct mp_filter *parent)
+{
+ struct mp_filter *f = mp_filter_create(parent, &autoconvert_filter);
+ if (!f)
+ return NULL;
+
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
+
+ struct priv *p = f->priv;
+ p->public.f = f;
+ p->log = f->log;
+ p->sub.in = f->ppins[0];
+ p->sub.out = f->ppins[1];
+
+ return &p->public;
+}
diff --git a/filters/f_autoconvert.h b/filters/f_autoconvert.h
new file mode 100644
index 0000000000..72af21a0df
--- /dev/null
+++ b/filters/f_autoconvert.h
@@ -0,0 +1,39 @@
+#pragma once
+
+#include "filter.h"
+
+// A filter which automatically creates and uses a conversion filter based on
+// the filter settings, or passes through data unchanged if no conversion is
+// required.
+struct mp_autoconvert {
+ // f->pins[0] is input, f->pins[1] is output
+ struct mp_filter *f;
+};
+
+// (to free this, free the filter itself, mp_autoconvert.f)
+struct mp_autoconvert *mp_autoconvert_create(struct mp_filter *parent);
+
+// Add the imgfmt as allowed video image format, and error on non-video frames.
+// Each call adds to the list of allowed formats. Before the first call, all
+// formats are allowed (even non-video).
+// subfmt can be used to specify underlying surface formats for hardware formats,
+// otherwise must be 0.
+void mp_autoconvert_add_imgfmt(struct mp_autoconvert *c, int imgfmt, int subfmt);
+
+// Add the formats supported by the hwdec interops (or essentially refine them),
+// and trigger conversion if hw_subfmts mismatch. This is mostly a hack for
+// D3D11/ANGLE (which supports NV12 only).
+// Must be called mp_autoconvert_add_imgfmt(), and overrides them where formats
+// collide.
+struct mp_hwdec_devices;
+void mp_autoconvert_add_vo_hwdec_subfmts(struct mp_autoconvert *c,
+ struct mp_hwdec_devices *devs);
+
+// Reset set of allowed formats back to initial state. (This does not flush
+// any frames or remove currently active filters, although to get reasonable
+// behavior, you need to readd all previously allowed formats, or reset the
+// filter.)
+void mp_autoconvert_clear(struct mp_autoconvert *c);
+
+// vf_d3d11vpp.c
+struct mp_filter *vf_d3d11_create_outconv(struct mp_filter *parent);
diff --git a/filters/f_hwtransfer.c b/filters/f_hwtransfer.c
new file mode 100644
index 0000000000..6ffda567ae
--- /dev/null
+++ b/filters/f_hwtransfer.c
@@ -0,0 +1,299 @@
+#include <libavutil/buffer.h>
+#include <libavutil/hwcontext.h>
+#include <libavutil/mem.h>
+
+#include "video/fmt-conversion.h"
+#include "video/hwdec.h"
+#include "video/mp_image.h"
+#include "video/mp_image_pool.h"
+
+#include "f_hwtransfer.h"
+#include "filter_internal.h"
+
+struct priv {
+ AVBufferRef *av_device_ctx;
+
+ AVBufferRef *hw_pool;
+
+ int last_input_fmt;
+ int last_upload_fmt;
+ int last_sw_fmt;
+
+ struct mp_hwupload public;
+};
+
+static bool update_format_decision(struct priv *p, int input_fmt)
+{
+ struct mp_hwupload *u = &p->public;
+
+ if (!input_fmt)
+ return false;
+
+ if (input_fmt == p->last_input_fmt)
+ return true;
+
+ p->last_input_fmt = 0;
+
+ int res = mp_imgfmt_select_best_list(u->upload_fmts, u->num_upload_fmts,
+ input_fmt);
+
+ if (!res)
+ return false;
+
+ // Find which sw format we should use.
+ // NOTE: if there are ever any hw APIs that actually do expensive
+ // conversions on mismatching format uploads, we should probably first look
+ // which sw format is preferred?
+ int index = -1;
+ for (int n = 0; n < u->num_upload_fmts; n++) {
+ if (u->upload_fmts[n] == res)
+ index = n;
+ }
+
+ if (index < 0)
+ return false;
+
+ for (int n = 0; n < u->num_fmts; n++) {
+ if (u->fmt_upload_index[n] >= index &&
+ index < u->fmt_upload_index[n] + u->fmt_upload_num[n])
+ {
+ p->last_input_fmt = input_fmt;
+ p->last_upload_fmt = u->upload_fmts[index];
+ p->last_sw_fmt = u->fmts[n];
+ MP_INFO(u->f, "upload %s -> %s\n",
+ mp_imgfmt_to_name(p->last_sw_fmt),
+ mp_imgfmt_to_name(p->last_input_fmt));
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int mp_hwupload_find_upload_format(struct mp_hwupload *u, int imgfmt)
+{
+ struct priv *p = u->f->priv;
+
+ if (!update_format_decision(p, imgfmt))
+ return 0;
+ return p->last_upload_fmt;
+}
+
+static void process(struct mp_filter *f)
+{
+ struct priv *p = f->priv;
+
+ if (!mp_pin_can_transfer_data(f->ppins[1], f->ppins[0]))
+ return;
+
+ struct mp_frame frame = mp_pin_out_read(f->ppins[0]);
+ if (mp_frame_is_signaling(frame)) {
+ mp_pin_in_write(f->ppins[1], frame);
+ return;
+ }
+ if (frame.type != MP_FRAME_VIDEO) {
+ MP_ERR(f, "unsupported frame type\n");
+ goto error;
+ }
+ struct mp_image *src = frame.data;
+
+ // As documented, just pass though HW frames.
+ if (IMGFMT_IS_HWACCEL(src->imgfmt)) {
+ mp_pin_in_write(f->ppins[1], frame);
+ return;
+ }
+
+ if (src->w % 2 || src->h % 2) {
+ MP_ERR(f, "non-mod 2 input frames unsupported\n");
+ goto error;
+ }
+
+ if (!update_format_decision(p, src->imgfmt)) {
+ MP_ERR(f, "no hw upload format found\n");
+ goto error;
+ }
+
+ if (!mp_update_av_hw_frames_pool(&p->hw_pool, p->av_device_ctx,
+ p->public.hw_imgfmt, p->last_sw_fmt,
+ src->w, src->h))
+ {
+ MP_ERR(f, "failed to create frame pool\n");
+ goto error;
+ }
+
+ struct mp_image *dst = mp_av_pool_image_hw_upload(p->hw_pool, src);
+ if (!dst)
+ goto error;
+
+ mp_frame_unref(&frame);
+ mp_pin_in_write(f->ppins[1], MAKE_FRAME(MP_FRAME_VIDEO, dst));
+
+ return;
+
+error:
+ mp_frame_unref(&frame);
+ MP_ERR(f, "failed to upload frame\n");
+ mp_filter_internal_mark_failed(f);
+}
+
+static void destroy(struct mp_filter *f)
+{
+ struct priv *p = f->priv;
+
+ av_buffer_unref(&p->hw_pool);
+ av_buffer_unref(&p->av_device_ctx);
+}
+
+static const struct mp_filter_info hwupload_filter = {
+ .name = "hwupload",
+ .priv_size = sizeof(struct priv),
+ .process = process,
+ .destroy = destroy,
+};
+
+// The VO layer might have restricted format support. It might actually
+// work if this is input to a conversion filter anyway, but our format
+// negotiation is too stupid and non-existent to detect this.
+// So filter out all not explicitly supported formats.
+static bool vo_supports(struct mp_hwdec_ctx *ctx, int hw_fmt, int sw_fmt)
+{
+ if (!ctx->hw_imgfmt)
+ return true; // if unset, all formats are allowed
+ if (ctx->hw_imgfmt != hw_fmt)
+ return false;
+
+ for (int i = 0; ctx->supported_formats && ctx->supported_formats[i]; i++) {
+ if (ctx->supported_formats[i] == sw_fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static bool probe_formats(struct mp_hwupload *u, int hw_imgfmt)
+{
+ struct priv *p = u->f->priv;
+
+ u->hw_imgfmt = hw_imgfmt;
+ u->num_fmts = 0;
+ u->num_upload_fmts = 0;
+
+ struct mp_stream_info *info = mp_filter_find_stream_info(u->f);
+ if (!info || !info->hwdec_devs) {
+ MP_ERR(u->f, "no hw context\n");
+ return false;
+ }
+
+ struct mp_hwdec_ctx *ctx = NULL;
+ AVHWFramesConstraints *cstr = NULL;
+
+ for (int n = 0; ; n++) {
+ struct mp_hwdec_ctx *cur = hwdec_devices_get_n(info->hwdec_devs, n);
+ if (!cur)
+ break;
+ if (!cur->av_device_ref)
+ continue;
+ cstr = av_hwdevice_get_hwframe_constraints(cur->av_device_ref, NULL);
+ if (!cstr)
+ continue;
+ bool found = false;
+ for (int i = 0; cstr->valid_hw_formats &&
+ cstr->valid_hw_formats[i] != AV_PIX_FMT_NONE; i++)
+ {
+ found |= cstr->valid_hw_formats[i] == imgfmt2pixfmt(hw_imgfmt);
+ }
+ if (found && (!cur->hw_imgfmt || cur->hw_imgfmt == hw_imgfmt)) {
+ ctx = cur;
+ break;
+ }
+ av_hwframe_constraints_free(&cstr);
+ }
+
+ if (!ctx) {
+ MP_ERR(u->f, "no support for this hw format\n");
+ return false;
+ }
+
+ // Probe for supported formats. This is very roundabout, because the
+ // hwcontext API does not give us this information directly. We resort to
+ // creating temporary AVHWFramesContexts in order to retrieve the list of
+ // supported formats. This should be relatively cheap as we don't create
+ // any real frames (although some backends do for probing info).
+
+ for (int n = 0; cstr->valid_sw_formats &&
+ cstr->valid_sw_formats[n] != AV_PIX_FMT_NONE; n++)
+ {
+ int imgfmt = pixfmt2imgfmt(cstr->valid_sw_formats[n]);
+ if (!imgfmt)
+ continue;
+
+ MP_VERBOSE(u->f, "looking at format %s\n", mp_imgfmt_to_name(imgfmt));
+
+ // Creates an AVHWFramesContexts with the given parameters.
+ AVBufferRef *frames = NULL;
+ if (!mp_update_av_hw_frames_pool(&frames, ctx->av_device_ref,
+ hw_imgfmt, imgfmt, 128, 128))
+ {
+ MP_WARN(u->f, "failed to allocate pool\n");
+ continue;
+ }
+
+ enum AVPixelFormat *fmts;
+ if (av_hwframe_transfer_get_formats(frames,
+ AV_HWFRAME_TRANSFER_DIRECTION_TO, &fmts, 0) >= 0)
+ {
+ int index = u->num_fmts;
+ MP_TARRAY_APPEND(p, u->fmts, u->num_fmts, imgfmt);
+ MP_TARRAY_GROW(p, u->fmt_upload_index, index);
+ MP_TARRAY_GROW(p, u->fmt_upload_num, index);
+
+ u->fmt_upload_index[index] = u->num_upload_fmts;
+
+ for (int i = 0; fmts[i] != AV_PIX_FMT_NONE; i++) {
+ int fmt = pixfmt2imgfmt(fmts[i]);
+ if (!fmt)
+ continue;
+ MP_VERBOSE(u->f, "supports %s\n", mp_imgfmt_to_name(fmt));
+ if (vo_supports(ctx, hw_imgfmt, fmt))
+ MP_TARRAY_APPEND(p, u->upload_fmts, u->num_upload_fmts, fmt);
+ }
+
+ u->fmt_upload_num[index] =
+ u->num_upload_fmts - u->fmt_upload_index[index];
+
+ av_free(fmts);
+ }
+
+ av_buffer_unref(&frames);
+ }
+
+ p->av_device_ctx = av_buffer_ref(ctx->av_device_ref);
+ if (!p->av_device_ctx)
+ return false;
+
+ return u->num_upload_fmts > 0;
+}
+
+struct mp_hwupload *mp_hwupload_create(struct mp_filter *parent, int hw_imgfmt)
+{
+ struct mp_filter *f = mp_filter_create(parent, &hwupload_filter);
+ if (!f)
+ return NULL;
+
+ struct priv *p = f->priv;
+ struct mp_hwupload *u = &p->public;
+ u->f = f;
+
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
+
+ if (!probe_formats(u, hw_imgfmt)) {
+ MP_ERR(f, "hardware format not supported\n");
+ goto error;
+ }
+
+ return u;
+error:
+ talloc_free(f);
+ return NULL;
+}
diff --git a/filters/f_hwtransfer.h b/filters/f_hwtransfer.h
new file mode 100644
index 0000000000..4595cb393d
--- /dev/null
+++ b/filters/f_hwtransfer.h
@@ -0,0 +1,32 @@
+#pragma once
+
+#include "filter.h"
+
+// A filter which uploads sw frames to hw. Ignores hw frames.
+struct mp_hwupload {
+ struct mp_filter *f;
+
+ // Hardware wrapper format, e.g. IMGFMT_VAAPI.
+ int hw_imgfmt;
+
+ // List of supported underlying surface formats.
+ int *fmts;
+ int num_fmts;
+ // List of supported upload image formats. May contain duplicate entries
+ // (which should be ignored).
+ int *upload_fmts;
+ int num_upload_fmts;
+ // For fmts[n], fmt_upload_index[n] gives the index of the first supported
+ // upload format in upload_fmts[], and fmt_upload_num[n] gives the number
+ // of formats at this position.
+ int *fmt_upload_index;
+ int *fmt_upload_num;
+};
+
+struct mp_hwupload *mp_hwupload_create(struct mp_filter *parent, int hw_imgfmt);
+
+// Return the best format suited for upload that is supported for a given input
+// imgfmt. This returns the same as imgfmt if the format is natively supported,
+// and otherwise a format that likely results in the least loss.
+// Returns 0 if completely unsupported.
+int mp_hwupload_find_upload_format(struct mp_hwupload *u, int imgfmt);
diff --git a/filters/f_lavfi.c b/filters/f_lavfi.c
new file mode 100644
index 0000000000..a97f126efb
--- /dev/null
+++ b/filters/f_lavfi.c
@@ -0,0 +1,952 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <assert.h>
+
+#include <libavutil/avstring.h>
+#include <libavutil/mem.h>
+#include <libavutil/mathematics.h>
+#include <libavutil/rational.h>
+#include <libavutil/error.h>
+#include <libavutil/opt.h>
+#include <libavfilter/avfilter.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
+
+#include "common/common.h"
+#include "common/av_common.h"
+#include "common/tags.h"
+#include "common/msg.h"
+
+#include "audio/format.h"
+#include "audio/aframe.h"
+#include "video/mp_image.h"
+#include "audio/fmt-conversion.h"
+#include "video/fmt-conversion.h"
+#include "video/hwdec.h"
+
+#include "f_lavfi.h"
+#include "filter.h"
+#include "filter_internal.h"
+#include "user_filters.h"
+
+#if LIBAVFILTER_VERSION_MICRO < 100
+#define av_buffersink_get_frame_flags(a, b, c) av_buffersink_get_frame(a, b)
+#define AV_BUFFERSINK_FLAG_NO_REQUEST 0
+#endif
+
+struct lavfi {
+ struct mp_log *log;
+ struct mp_filter *f;
+
+ char *graph_string;
+ char **graph_opts;
+ bool force_bidir;
+ enum mp_frame_type force_type;
+ bool direct_filter;
+ char **direct_filter_opts;
+
+ AVFilterGraph *graph;
+ // Set to true once all inputs have been initialized, and the graph is
+ // linked.
+ bool initialized;
+
+ // Graph is draining to either handle format changes (if input format
+ // changes for one pad, recreate the graph after draining all buffered
+ // frames), or undo previously sent EOF (libavfilter does not accept
+ // input anymore after sending EOF, so recreate the graph to "unstuck" it).
+ bool draining_recover;
+
+ // Filter can't be put into a working state.
+ bool failed;
+
+ struct lavfi_pad **in_pads;
+ int num_in_pads;
+
+ struct lavfi_pad **out_pads;
+ int num_out_pads;
+
+ struct lavfi_pad **all_pads;
+ int num_all_pads;
+
+ AVFrame *tmp_frame;
+
+ struct mp_lavfi public;
+};
+
+struct lavfi_pad {
+ struct lavfi *main;
+ enum mp_frame_type type;
+ enum mp_pin_dir dir;
+ char *name; // user-given pad name
+
+ struct mp_pin *pin; // internal pin for this (never NULL once initialized)
+ int pin_index;
+
+ AVFilterContext *filter;
+ int filter_pad;
+ // buffersrc or buffersink connected to filter/filter_pad
+ AVFilterContext *buffer;
+ AVRational timebase;
+ bool buffer_is_eof; // received/sent EOF to the buffer
+
+ struct mp_tags *metadata;
+
+ // 1-frame queue for input.
+ struct mp_frame pending;
+
+ // Used to check input format changes.
+ struct mp_frame in_fmt;
+};
+
+// Free the libavfilter graph (not c), reset all state.
+// Does not free pending data intentionally.
+static void free_graph(struct lavfi *c)
+{
+ avfilter_graph_free(&c->graph);
+ for (int n = 0; n < c->num_all_pads; n++) {
+ struct lavfi_pad *pad = c->all_pads[n];
+
+ pad->filter = NULL;
+ pad->filter_pad = -1;
+ pad->buffer = NULL;
+ mp_frame_unref(&pad->in_fmt);
+ pad->buffer_is_eof = false;
+ }
+ c->initialized = false;
+ c->draining_recover = false;
+}
+
+static void add_pad(struct lavfi *c, int dir, int index, AVFilterContext *filter,
+ int filter_pad, const char *name, bool first_init)
+{
+ if (c->failed)
+ return;
+
+ enum AVMediaType avmt;
+ if (dir == MP_PIN_IN) {
+ avmt = avfilter_pad_get_type(filter->input_pads, filter_pad);
+ } else {
+ avmt = avfilter_pad_get_type(filter->output_pads, filter_pad);
+ }
+ int type;
+ switch (avmt) {
+ case AVMEDIA_TYPE_VIDEO: type = MP_FRAME_VIDEO; break;
+ case AVMEDIA_TYPE_AUDIO: type = MP_FRAME_AUDIO; break;
+ default:
+ MP_FATAL(c, "unknown media type\n");
+ c->failed = true;
+ return;
+ }
+
+ // For anonymous pads, just make something up. libavfilter allows duplicate
+ // pad names (while we don't), so we check for collisions along with normal
+ // duplicate pads below.
+ char tmp[80];
+ const char *dir_string = dir == MP_PIN_IN ? "in" : "out";
+ if (name) {
+ if (c->direct_filter) {
+ // libavfilter has this very unpleasant thing that filter labels
+ // don't have to be unique - in particular, both input and output
+ // are usually named "default". With direct filters, the user has
+ // no chance to provide better names, so do something to resolve it.
+ snprintf(tmp, sizeof(tmp), "%s_%s", name, dir_string);
+ name = tmp;
+ }
+ } else {
+ snprintf(tmp, sizeof(tmp), "%s%d", dir_string, index);
+ name = tmp;
+ }
+
+ struct lavfi_pad *p = NULL;
+ for (int n = 0; n < c->num_all_pads; n++) {
+ if (strcmp(c->all_pads[n]->name, name) == 0) {
+ p = c->all_pads[n];
+ break;
+ }
+ }
+
+ if (p) {
+ // Graph recreation case: reassociate an existing pad.
+ if (p->filter) {
+ // Collision due to duplicate names.
+ MP_FATAL(c, "more than one pad with label '%s'\n", name);
+ c->failed = true;
+ return;
+ }
+ if (p->dir != dir || p->type != type) {
+ // libavfilter graph parser behavior not deterministic.
+ MP_FATAL(c, "pad '%s' changed type or direction\n", name);
+ c->failed = true;
+ return;
+ }
+ } else {
+ if (!first_init) {
+ MP_FATAL(c, "filter pad '%s' got added later?\n", name);
+ c->failed = true;
+ return;
+ }
+ p = talloc_zero(c, struct lavfi_pad);
+ p->main = c;
+ p->dir = dir;
+ p->name = talloc_strdup(p, name);
+ p->type = type;
+ p->pin_index = -1;
+ p->metadata = talloc_zero(p, struct mp_tags);
+ if (p->dir == MP_PIN_IN)
+ MP_TARRAY_APPEND(c, c->in_pads, c->num_in_pads, p);
+ if (p->dir == MP_PIN_OUT)
+ MP_TARRAY_APPEND(c, c->out_pads, c->num_out_pads, p);
+ MP_TARRAY_APPEND(c, c->all_pads, c->num_all_pads, p);
+ }
+ p->filter = filter;
+ p->filter_pad = filter_pad;
+}
+
+static void add_pads(struct lavfi *c, int dir, AVFilterInOut *l, bool first_init)
+{
+ int index = 0;
+ for (; l; l = l->next)
+ add_pad(c, dir, index++, l->filter_ctx, l->pad_idx, l->name, first_init);
+}
+
+static void add_pads_direct(struct lavfi *c, int dir, AVFilterContext *f,
+ AVFilterPad *pads, int num_pads, bool first_init)
+{
+ for (int n = 0; n < num_pads; n++)
+ add_pad(c, dir, n, f, n, avfilter_pad_get_name(pads, n), first_init);
+}
+
+// Parse the user-provided filter graph, and populate the unlinked filter pads.
+static void precreate_graph(struct lavfi *c, bool first_init)
+{
+ assert(!c->graph);
+
+ c->failed = false;
+
+ c->graph = avfilter_graph_alloc();
+ if (!c->graph)
+ abort();
+
+ if (mp_set_avopts(c->log, c->graph, c->graph_opts) < 0)
+ goto error;
+
+ if (c->direct_filter) {
+ AVFilterContext *filter = avfilter_graph_alloc_filter(c->graph,
+ avfilter_get_by_name(c->graph_string), "filter");
+ if (!filter) {
+ MP_FATAL(c, "filter '%s' not found or failed to allocate\n",
+ c->graph_string);
+ goto error;
+ }
+
+ if (mp_set_avopts(c->log, filter->priv, c->direct_filter_opts) < 0)
+ goto error;
+
+ if (avfilter_init_str(filter, NULL) < 0) {
+ MP_FATAL(c, "filter failed to initialize\n");
+ goto error;
+ }
+
+ add_pads_direct(c, MP_PIN_IN, filter, filter->input_pads,
+ filter->nb_inputs, first_init);
+ add_pads_direct(c, MP_PIN_OUT, filter, filter->output_pads,
+ filter->nb_outputs, first_init);
+ } else {
+ AVFilterInOut *in = NULL, *out = NULL;
+ if (avfilter_graph_parse2(c->graph, c->graph_string, &in, &out) < 0) {
+ MP_FATAL(c, "parsing the filter graph failed\n");
+ goto error;
+ }
+ add_pads(c, MP_PIN_IN, in, first_init);
+ add_pads(c, MP_PIN_OUT, out, first_init);
+ avfilter_inout_free(&in);
+ avfilter_inout_free(&out);
+ }
+
+ for (int n = 0; n < c->num_all_pads; n++)
+ c->failed |= !c->all_pads[n]->filter;
+
+ if (c->failed)
+ goto error;
+
+ return;
+
+error:
+ free_graph(c);
+ c->failed = true;
+ mp_filter_internal_mark_failed(c->f);
+ return;
+}
+
+// Ensure to send EOF to each input pad, so the graph can be drained properly.
+static void send_global_eof(struct lavfi *c)
+{
+ for (int n = 0; n < c->num_in_pads; n++) {
+ struct lavfi_pad *pad = c->in_pads[n];
+ if (!pad->buffer || pad->buffer_is_eof)
+ continue;
+
+ if (av_buffersrc_add_frame(pad->buffer, NULL) < 0)
+ MP_FATAL(c, "could not send EOF to filter\n");
+
+ pad->buffer_is_eof = true;
+ }
+}
+
+// libavfilter allows changing some parameters on the fly, but not
+// others.
+static bool is_aformat_ok(struct mp_aframe *a, struct mp_aframe *b)
+{
+ struct mp_chmap ca = {0}, cb = {0};
+ mp_aframe_get_chmap(a, &ca);
+ mp_aframe_get_chmap(b, &cb);
+ return mp_chmap_equals(&ca, &cb) &&
+ mp_aframe_get_rate(a) == mp_aframe_get_rate(b) &&
+ mp_aframe_get_format(a) == mp_aframe_get_format(b);
+}
+static bool is_vformat_ok(struct mp_image *a, struct mp_image *b)
+{
+ return a->imgfmt == b->imgfmt &&
+ a->w == b->w && a->h && b->h &&
+ a->params.p_w == b->params.p_w && a->params.p_h == b->params.p_h;
+}
+static bool is_format_ok(struct mp_frame a, struct mp_frame b)
+{
+ if (a.type == b.type && a.type == MP_FRAME_VIDEO)
+ return is_vformat_ok(a.data, b.data);
+ if (a.type == b.type && a.type == MP_FRAME_AUDIO)
+ return is_aformat_ok(a.data, b.data);
+ return false;
+}
+
+static void read_pad_input(struct lavfi *c, struct lavfi_pad *pad)
+{
+ assert(pad->dir == MP_PIN_IN);
+
+ if (pad->pending.type || c->draining_recover)
+ return;
+
+ pad->pending = mp_pin_out_read(pad->pin);
+
+ if (pad->pending.type && pad->pending.type != MP_FRAME_EOF &&
+ pad->pending.type != pad->type)
+ {
+ MP_FATAL(c, "unknown frame %s\n", mp_frame_type_str(pad->pending.type));
+ mp_frame_unref(&pad->pending);
+ }
+
+ if (mp_frame_is_data(pad->pending) && pad->in_fmt.type &&
+ !is_format_ok(pad->pending, pad->in_fmt))
+ {
+ if (!c->draining_recover)
+ MP_VERBOSE(c, "format change on %s\n", pad->name);
+ c->draining_recover = true;
+ if (c->initialized)
+ send_global_eof(c);
+ }
+}
+
+// Attempt to initialize all pads. Return true if all are initialized, or
+// false if more data is needed (or on error).
+static bool init_pads(struct lavfi *c)
+{
+ if (!c->graph)
+ goto error;
+
+ for (int n = 0; n < c->num_out_pads; n++) {
+ struct lavfi_pad *pad = c->out_pads[n];
+ if (pad->buffer)
+ continue;
+
+ const AVFilter *dst_filter = NULL;
+ if (pad->type == MP_FRAME_AUDIO) {
+ dst_filter = avfilter_get_by_name("abuffersink");
+ } else if (pad->type == MP_FRAME_VIDEO) {
+ dst_filter = avfilter_get_by_name("buffersink");
+ } else {
+ assert(0);
+ }
+
+ if (!dst_filter)
+ goto error;
+
+ char name[256];
+ snprintf(name, sizeof(name), "mpv_sink_%s", pad->name);
+
+ if (avfilter_graph_create_filter(&pad->buffer, dst_filter,
+ name, NULL, NULL, c->graph) < 0)
+ goto error;
+
+ if (avfilter_link(pad->filter, pad->filter_pad, pad->buffer, 0) < 0)
+ goto error;
+ }
+
+ for (int n = 0; n < c->num_in_pads; n++) {
+ struct lavfi_pad *pad = c->in_pads[n];
+ if (pad->buffer)
+ continue;
+
+ mp_frame_unref(&pad->in_fmt);
+
+ read_pad_input(c, pad);
+ // no input data, format unknown, can't init, wait longer.
+ if (!pad->pending.type)
+ return false;
+
+ if (mp_frame_is_data(pad->pending)) {
+ assert(pad->pending.type == pad->type);
+
+ pad->in_fmt = mp_frame_ref(pad->pending);
+ if (!pad->in_fmt.type)
+ goto error;
+
+ if (pad->in_fmt.type == MP_FRAME_VIDEO)
+ mp_image_unref_data(pad->in_fmt.data);
+ if (pad->in_fmt.type == MP_FRAME_AUDIO)
+ mp_aframe_unref_data(pad->in_fmt.data);
+ }
+
+ if (pad->pending.type == MP_FRAME_EOF && !pad->in_fmt.type) {
+ // libavfilter makes this painful. Init it with a dummy config,
+ // just so we can tell it the stream is EOF.
+ if (pad->type == MP_FRAME_AUDIO) {
+ struct mp_aframe *fmt = mp_aframe_create();
+ mp_aframe_set_format(fmt, AF_FORMAT_FLOAT);
+ mp_aframe_set_chmap(fmt, &(struct mp_chmap)MP_CHMAP_INIT_STEREO);
+ mp_aframe_set_rate(fmt, 48000);
+ pad->in_fmt = (struct mp_frame){MP_FRAME_AUDIO, fmt};
+ }
+ if (pad->type == MP_FRAME_VIDEO) {
+ struct mp_image *fmt = talloc_zero(NULL, struct mp_image);
+ mp_image_setfmt(fmt, IMGFMT_420P);
+ mp_image_set_size(fmt, 64, 64);
+ pad->in_fmt = (struct mp_frame){MP_FRAME_VIDEO, fmt};
+ }
+ }
+
+ if (pad->in_fmt.type != pad->type)
+ goto error;
+
+ AVBufferSrcParameters *params = av_buffersrc_parameters_alloc();
+ if (!params)
+ goto error;
+
+ pad->timebase = AV_TIME_BASE_Q;
+
+ char *filter_name = NULL;
+ if (pad->type == MP_FRAME_AUDIO) {
+ struct mp_aframe *fmt = pad->in_fmt.data;
+ params->format = af_to_avformat(mp_aframe_get_format(fmt));
+ params->sample_rate = mp_aframe_get_rate(fmt);
+ struct mp_chmap chmap = {0};
+ mp_aframe_get_chmap(fmt, &chmap);
+ params->channel_layout = mp_chmap_to_lavc(&chmap);
+ pad->timebase = (AVRational){1, mp_aframe_get_rate(fmt)};
+ filter_name = "abuffer";
+ } else if (pad->type == MP_FRAME_VIDEO) {
+ struct mp_image *fmt = pad->in_fmt.data;
+ params->format = imgfmt2pixfmt(fmt->imgfmt);
+ params->width = fmt->w;
+ params->height = fmt->h;
+ params->sample_aspect_ratio.num = fmt->params.p_w;
+ params->sample_aspect_ratio.den = fmt->params.p_h;
+ params->hw_frames_ctx = fmt->hwctx;
+ filter_name = "buffer";
+ } else {
+ assert(0);
+ }
+
+ params->time_base = pad->timebase;
+
+ const AVFilter *filter = avfilter_get_by_name(filter_name);
+ if (filter) {
+ char name[256];
+ snprintf(name, sizeof(name), "mpv_src_%s", pad->name);
+
+ pad->buffer = avfilter_graph_alloc_filter(c->graph, filter, name);
+ }
+ if (!pad->buffer) {
+ av_free(params);
+ goto error;
+ }
+
+ int ret = av_buffersrc_parameters_set(pad->buffer, params);
+ av_free(params);
+ if (ret < 0)
+ goto error;
+
+ if (avfilter_init_str(pad->buffer, NULL) < 0)
+ goto error;
+
+ if (avfilter_link(pad->buffer, 0, pad->filter, pad->filter_pad) < 0)
+ goto error;
+ }
+
+ return true;
+error:
+ MP_FATAL(c, "could not initialize filter pads\n");
+ c->failed = true;
+ mp_filter_internal_mark_failed(c->f);
+ return false;
+}
+
+static void dump_graph(struct lavfi *c)
+{
+#if LIBAVFILTER_VERSION_MICRO >= 100
+ MP_DBG(c, "Filter graph:\n");
+ char *s = avfilter_graph_dump(c->graph, NULL);
+ if (s)
+ MP_DBG(c, "%s\n", s);
+ av_free(s);
+#endif
+}
+
+// Initialize the graph if all inputs have formats set. If it's already
+// initialized, or can't be initialized yet, do nothing.
+static void init_graph(struct lavfi *c)
+{
+ assert(!c->initialized);
+
+ if (!c->graph)
+ precreate_graph(c, false);
+
+ if (init_pads(c)) {
+ struct mp_stream_info *info = mp_filter_find_stream_info(c->f);
+ if (info && info->hwdec_devs) {
+ struct mp_hwdec_ctx *hwdec = hwdec_devices_get_first(info->hwdec_devs);
+ for (int n = 0; n < c->graph->nb_filters; n++) {
+ AVFilterContext *filter = c->graph->filters[n];
+ if (hwdec && hwdec->av_device_ref)
+ filter->hw_device_ctx = av_buffer_ref(hwdec->av_device_ref);
+ }
+ }
+
+ // And here the actual libavfilter initialization happens.
+ if (avfilter_graph_config(c->graph, NULL) < 0) {
+ MP_FATAL(c, "failed to configure the filter graph\n");
+ free_graph(c);
+ c->failed = true;
+ mp_filter_internal_mark_failed(c->f);
+ return;
+ }
+
+ // The timebase is available after configuring.
+ for (int n = 0; n < c->num_out_pads; n++) {
+ struct lavfi_pad *pad = c->out_pads[n];
+
+ pad->timebase = pad->buffer->inputs[0]->time_base;
+ }
+
+ c->initialized = true;
+
+ if (!c->direct_filter) // (output uninteresting for direct filters)
+ dump_graph(c);
+ }
+}
+
+static bool feed_input_pads(struct lavfi *c)
+{
+ bool progress = false;
+ bool was_draining = c->draining_recover;
+
+ assert(c->initialized);
+
+ for (int n = 0; n < c->num_in_pads; n++) {
+ struct lavfi_pad *pad = c->in_pads[n];
+
+ bool requested = true;
+#if LIBAVFILTER_VERSION_MICRO >= 100
+ requested = av_buffersrc_get_nb_failed_requests(pad->buffer) > 0;
+#endif
+
+ // Always request a frame after EOF so that we can know if the EOF state
+ // changes (e.g. for sparse streams with midstream EOF).
+ requested |= pad->buffer_is_eof;
+
+ if (requested)
+ read_pad_input(c, pad);
+
+ if (!pad->pending.type || c->draining_recover)
+ continue;
+
+ if (pad->buffer_is_eof) {
+ MP_WARN(c, "eof state changed on %s\n", pad->name);
+ c->draining_recover = true;
+ send_global_eof(c);
+ continue;
+ }
+
+ AVFrame *frame = mp_frame_to_av(pad->pending, &pad->timebase);
+ bool eof = pad->pending.type == MP_FRAME_EOF;
+
+ mp_frame_unref(&pad->pending);
+
+ if (!frame && !eof) {
+ MP_FATAL(c, "out of memory or unsupported format\n");
+ continue;
+ }
+
+ pad->buffer_is_eof = !frame;
+
+ if (av_buffersrc_add_frame(pad->buffer, frame) < 0)
+ MP_FATAL(c, "could not pass frame to filter\n");
+ av_frame_free(&frame);
+
+ progress = true;
+ }
+
+ if (!was_draining && c->draining_recover)
+ progress = true;
+
+ return progress;
+}
+
+static bool read_output_pads(struct lavfi *c)
+{
+ bool progress = false;
+
+ assert(c->initialized);
+
+ for (int n = 0; n < c->num_out_pads; n++) {
+ struct lavfi_pad *pad = c->out_pads[n];
+
+ if (!mp_pin_in_needs_data(pad->pin))
+ continue;
+
+ assert(pad->buffer);
+
+ int r = AVERROR_EOF;
+ if (!pad->buffer_is_eof)
+ r = av_buffersink_get_frame_flags(pad->buffer, c->tmp_frame, 0);
+ if (r >= 0) {
+#if LIBAVUTIL_VERSION_MICRO >= 100
+ mp_tags_copy_from_av_dictionary(pad->metadata, c->tmp_frame->metadata);
+#endif
+ struct mp_frame frame =
+ mp_frame_from_av(pad->type, c->tmp_frame, &pad->timebase);
+ av_frame_unref(c->tmp_frame);
+ if (frame.type) {
+ mp_pin_in_write(pad->pin, frame);
+ } else {
+ MP_ERR(c, "could not use filter output\n");
+ mp_frame_unref(&frame);
+ }
+ progress = true;
+ } else if (r == AVERROR(EAGAIN)) {
+ // We expect that libavfilter will request input on one of the
+ // input pads (via av_buffersrc_get_nb_failed_requests()).
+ } else if (r == AVERROR_EOF) {
+ if (!c->draining_recover && !pad->buffer_is_eof)
+ mp_pin_in_write(pad->pin, MP_EOF_FRAME);
+ if (!pad->buffer_is_eof)
+ progress = true;
+ pad->buffer_is_eof = true;
+ } else {
+ // Real error - ignore it.
+ MP_ERR(c, "error on filtering (%d)\n", r);
+ }
+ }
+
+ return progress;
+}
+
+static void lavfi_process(struct mp_filter *f)
+{
+ struct lavfi *c = f->priv;
+
+ if (!c->initialized)
+ init_graph(c);
+
+ while (c->initialized) {
+ bool a = read_output_pads(c);
+ bool b = feed_input_pads(c);
+ if (!a && !b)
+ break;
+ }
+
+ // Start over on format changes or EOF draining.
+ if (c->draining_recover) {
+ // Wait until all outputs got EOF.
+ bool all_eof = true;
+ for (int n = 0; n < c->num_out_pads; n++)
+ all_eof &= c->out_pads[n]->buffer_is_eof;
+
+ if (all_eof) {
+ MP_VERBOSE(c, "recovering all eof\n");
+ free_graph(c);
+ mp_filter_internal_mark_progress(c->f);
+ }
+ }
+
+ if (c->failed)
+ mp_filter_internal_mark_failed(c->f);
+}
+
+static void lavfi_reset(struct mp_filter *f)
+{
+ struct lavfi *c = f->priv;
+
+ free_graph(c);
+
+ for (int n = 0; n < c->num_in_pads; n++)
+ mp_frame_unref(&c->in_pads[n]->pending);
+}
+
+static void lavfi_destroy(struct mp_filter *f)
+{
+ struct lavfi *c = f->priv;
+
+ lavfi_reset(f);
+ av_frame_free(&c->tmp_frame);
+}
+
+static bool lavfi_command(struct mp_filter *f, struct mp_filter_command *cmd)
+{
+ struct lavfi *c = f->priv;
+
+ if (!c->initialized)
+ return false;
+
+ switch (cmd->type) {
+#if LIBAVFILTER_VERSION_MICRO >= 100
+ case MP_FILTER_COMMAND_TEXT: {
+ return avfilter_graph_send_command(c->graph, "all", cmd->cmd, cmd->arg,
+ &(char){0}, 0, 0) >= 0;
+ }
+#endif
+ case MP_FILTER_COMMAND_GET_META: {
+ // We can worry later about what it should do to multi output filters.
+ if (c->num_out_pads < 1)
+ return false;
+ struct mp_tags **ptags = cmd->res;
+ *ptags = mp_tags_dup(NULL, c->out_pads[0]->metadata);
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+
+static const struct mp_filter_info lavfi_filter = {
+ .name = "lavfi",
+ .priv_size = sizeof(struct lavfi),
+ .process = lavfi_process,
+ .reset = lavfi_reset,
+ .destroy = lavfi_destroy,
+ .command = lavfi_command,
+};
+
+static struct lavfi *lavfi_alloc(struct mp_filter *parent)
+{
+ struct mp_filter *f = mp_filter_create(parent, &lavfi_filter);
+ if (!f)
+ return NULL;
+
+ struct lavfi *c = f->priv;
+
+ c->f = f;
+ c->log = f->log;
+ c->public.f = f;
+ c->tmp_frame = av_frame_alloc();
+ if (!c->tmp_frame)
+ abort();
+
+ return c;
+}
+
+static struct mp_lavfi *do_init(struct lavfi *c)
+{
+ precreate_graph(c, true);
+
+ if (c->failed)
+ goto error;
+
+ for (int n = 0; n < c->num_in_pads + c->num_out_pads; n++) {
+ // First add input pins to satisfy order for bidir graph types.
+ struct lavfi_pad *pad =
+ n < c->num_in_pads ? c->in_pads[n] : c->out_pads[n - c->num_in_pads];
+
+ pad->pin_index = c->f->num_pins;
+ pad->pin = mp_filter_add_pin(c->f, pad->dir, pad->name);
+
+ if (c->force_type && c->force_type != pad->type) {
+ MP_FATAL(c, "mismatching media type\n");
+ goto error;
+ }
+ }
+
+ if (c->force_bidir) {
+ if (c->f->num_pins != 2) {
+ MP_FATAL(c, "exactly 2 pads required\n");
+ goto error;
+ }
+ if (mp_pin_get_dir(c->f->ppins[0]) != MP_PIN_OUT ||
+ mp_pin_get_dir(c->f->ppins[1]) != MP_PIN_IN)
+ {
+ MP_FATAL(c, "1 input and 1 output pad required\n");
+ goto error;
+ }
+ }
+
+ return &c->public;
+
+error:
+ talloc_free(c->f);
+ return NULL;
+}
+
+struct mp_lavfi *mp_lavfi_create_graph(struct mp_filter *parent,
+ enum mp_frame_type type, bool bidir,
+ char **graph_opts, const char *graph)
+{
+ struct lavfi *c = lavfi_alloc(parent);
+ if (!c)
+ return NULL;
+
+ c->force_type = type;
+ c->force_bidir = bidir;
+ c->graph_opts = mp_dup_str_array(c, graph_opts);
+ c->graph_string = talloc_strdup(c, graph);
+
+ return do_init(c);
+}
+
+struct mp_lavfi *mp_lavfi_create_filter(struct mp_filter *parent,
+ enum mp_frame_type type, bool bidir,
+ char **graph_opts,
+ const char *filter, char **filter_opts)
+{
+ struct lavfi *c = lavfi_alloc(parent);
+ if (!c)
+ return NULL;
+
+ c->force_type = type;
+ c->force_bidir = bidir;
+ c->graph_opts = mp_dup_str_array(c, graph_opts);
+ c->graph_string = talloc_strdup(c, filter);
+ c->direct_filter_opts = mp_dup_str_array(c, filter_opts);
+ c->direct_filter = true;
+
+ return do_init(c);
+}
+
+struct lavfi_user_opts {
+ bool is_bridge;
+
+ char *graph;
+ char **avopts;
+
+ char *filter_name;
+ char **filter_opts;
+};
+
+static struct mp_filter *vf_lavfi_create(struct mp_filter *parent, void *options)
+{
+ struct lavfi_user_opts *opts = options;
+ struct mp_lavfi *l;
+ if (opts->is_bridge) {
+ l = mp_lavfi_create_filter(parent, MP_FRAME_VIDEO, true,
+ opts->avopts, opts->filter_name,
+ opts->filter_opts);
+ } else {
+ l = mp_lavfi_create_graph(parent, MP_FRAME_VIDEO, true,
+ opts->avopts, opts->graph);
+ }
+ talloc_free(opts);
+ return l ? l->f : NULL;
+}
+
+static bool is_single_video_only(const AVFilterPad *pads)
+{
+ int count = avfilter_pad_count(pads);
+ if (count != 1)
+ return false;
+ return avfilter_pad_get_type(pads, 0) == AVMEDIA_TYPE_VIDEO;
+}
+
+// Does it have exactly one video input and one video output?
+static bool is_usable(const AVFilter *filter)
+{
+ return is_single_video_only(filter->inputs) &&
+ is_single_video_only(filter->outputs);
+}
+
+static void print_help(struct mp_log *log)
+{
+ mp_info(log, "List of libavfilter filters:\n");
+ for (const AVFilter *filter = avfilter_next(NULL); filter;
+ filter = avfilter_next(filter))
+ {
+ if (is_usable(filter))
+ mp_info(log, " %-16s %s\n", filter->name, filter->description);
+ }
+ mp_info(log, "\n"
+ "This lists video->video filters only. Refer to\n"
+ "\n"
+ " https://ffmpeg.org/ffmpeg-filters.html\n"
+ "\n"
+ "to see how to use each filter and what arguments each filter takes.\n"
+ "Also, be sure to quote the FFmpeg filter string properly, e.g.:\n"
+ "\n"
+ " \"--vf=lavfi=[gradfun=20:30]\"\n"
+ "\n"
+ "Otherwise, mpv and libavfilter syntax will conflict.\n"
+ "\n");
+}
+
+#define OPT_BASE_STRUCT struct lavfi_user_opts
+
+const struct mp_user_filter_entry vf_lavfi = {
+ .desc = {
+ .description = "libavfilter bridge",
+ .name = "lavfi",
+ .priv_size = sizeof(OPT_BASE_STRUCT),
+ .options = (const m_option_t[]){
+ OPT_STRING("graph", graph, M_OPT_MIN, .min = 1),
+ OPT_KEYVALUELIST("o", avopts, 0),
+ {0}
+ },
+ .print_help = print_help,
+ },
+ .create = vf_lavfi_create,
+};
+
+const struct mp_user_filter_entry vf_lavfi_bridge = {
+ .desc = {
+ .description = "libavfilter bridge (explicit options)",
+ .name = "lavfi-bridge",
+ .priv_size = sizeof(OPT_BASE_STRUCT),
+ .options = (const m_option_t[]){
+ OPT_STRING("name", filter_name, M_OPT_MIN, .min = 1),
+ OPT_KEYVALUELIST("opts", filter_opts, 0),
+ OPT_KEYVALUELIST("o", avopts, 0),
+ {0}
+ },
+ .priv_defaults = &(const OPT_BASE_STRUCT){
+ .is_bridge = true,
+ },
+ .print_help = print_help,
+ },
+ .create = vf_lavfi_create,
+};
diff --git a/filters/f_lavfi.h b/filters/f_lavfi.h
new file mode 100644
index 0000000000..7efc96c3de
--- /dev/null
+++ b/filters/f_lavfi.h
@@ -0,0 +1,30 @@
+#pragma once
+
+#include "frame.h"
+
+// A wrapped libavfilter filter or filter graph.
+// (to free this, free the filtert itself, mp_lavfi.f)
+struct mp_lavfi {
+ // This mirrors the libavfilter pads according to the user specification.
+ struct mp_filter *f;
+};
+
+// Create a filter with the given libavfilter graph string. The graph must
+// have labels on all unconnected pads; these are exposed as pins.
+// type: if not 0, require all pads to have a compatible media type (or error)
+// bidir: if true, require exactly 2 pads, 1 input, 1 output (mp_lavfi.f will
+// have the input as pin 0, and the output as pin 1)
+// graph_opts: options for the filter graph, see mp_set_avopts() (NULL is OK)
+// graph: a libavfilter graph specification
+struct mp_lavfi *mp_lavfi_create_graph(struct mp_filter *parent,
+ enum mp_frame_type type, bool bidir,
+ char **graph_opts, const char *graph);
+
+// Unlike mp_lavfi_create_graph(), this creates a single filter, using explicit
+// options, and without involving the libavfilter graph parser. Instead of
+// a graph, it takes a filter name, and a key-value list of filter options
+// (which are applied with mp_set_avopts()).
+struct mp_lavfi *mp_lavfi_create_filter(struct mp_filter *parent,
+ enum mp_frame_type type, bool bidir,
+ char **graph_opts,
+ const char *filter, char **filter_opts);
diff --git a/filters/f_output_chain.c b/filters/f_output_chain.c
new file mode 100644
index 0000000000..d98c7ca4b3
--- /dev/null
+++ b/filters/f_output_chain.c
@@ -0,0 +1,564 @@
+#include "common/global.h"
+#include "options/m_config.h"
+#include "options/m_option.h"
+#include "video/out/vo.h"
+
+#include "filter_internal.h"
+
+#include "f_autoconvert.h"
+#include "f_auto_filters.h"
+#include "f_lavfi.h"
+#include "f_output_chain.h"
+#include "f_utils.h"
+#include "user_filters.h"
+
+struct chain {
+ struct mp_filter *f;
+ struct mp_log *log;
+
+ enum mp_output_chain_type type;
+
+ // Expected media type.
+ enum mp_frame_type frame_type;
+
+ struct mp_stream_info stream_info;
+
+ struct mp_user_filter **pre_filters;
+ int num_pre_filters;
+ struct mp_user_filter **post_filters;
+ int num_post_filters;
+
+ struct mp_user_filter **user_filters;
+ int num_user_filters;
+
+ // Concatentated list of pre+user+post filters.
+ struct mp_user_filter **all_filters;
+ int num_all_filters;
+ // First input/last output of all_filters[].
+ struct mp_pin *filters_in, *filters_out;
+
+ struct mp_user_filter *input, *output;
+ struct mp_autoconvert *convert;
+
+ struct vo *vo;
+
+ struct mp_output_chain public;
+};
+
+// This wraps each individual "actual" filter for:
+// - isolating against its failure (logging it and disabling the filter)
+// - tracking its output format (mostly for logging)
+// - store extra per-filter information like the filter label
+struct mp_user_filter {
+ struct chain *p;
+
+ struct mp_filter *wrapper; // parent filter for f
+ struct mp_filter *f; // the actual user filter
+ struct m_obj_settings *args; // NULL, or list of 1 item with creation args
+ char *label;
+ bool generated_label;
+ char *name;
+ bool is_output_converter;
+ bool is_input;
+
+ struct mp_image_params last_out_params;
+
+ bool failed;
+ bool error_eof_sent;
+};
+
+static void update_output_caps(struct chain *p)
+{
+ if (p->type != MP_OUTPUT_CHAIN_VIDEO)
+ return;
+
+ mp_autoconvert_clear(p->convert);
+
+ if (p->vo) {
+ uint8_t allowed_output_formats[IMGFMT_END - IMGFMT_START] = {0};
+ vo_query_formats(p->vo, allowed_output_formats);
+
+ for (int n = 0; n < MP_ARRAY_SIZE(allowed_output_formats); n++) {
+ if (allowed_output_formats[n])
+ mp_autoconvert_add_imgfmt(p->convert, IMGFMT_START + n, 0);
+ }
+
+ if (p->vo->hwdec_devs)
+ mp_autoconvert_add_vo_hwdec_subfmts(p->convert, p->vo->hwdec_devs);
+ }
+}
+
+static bool check_out_format_change(struct mp_user_filter *u,
+ struct mp_frame frame)
+{
+ struct chain *p = u->p;
+ bool changed = false;
+
+ if (frame.type == MP_FRAME_VIDEO) {
+ struct mp_image *img = frame.data;
+
+ if (!mp_image_params_equal(&img->params, &u->last_out_params)) {
+ MP_VERBOSE(p, "[%s] %s\n", u->name,
+ mp_image_params_to_str(&img->params));
+ u->last_out_params = img->params;
+
+ if (u->is_input) {
+ p->public.input_params = img->params;
+
+ // Unfortunately there's no good place to update these.
+ // But a common case is enabling HW decoding, which
+ // might init some support of them in the VO, and update
+ // the VO's format list.
+ update_output_caps(p);
+ } else if (u->is_output_converter) {
+ p->public.output_params = img->params;
+ }
+
+ p->public.reconfig_happened = true;
+ changed = true;
+ }
+ }
+
+ return changed;
+}
+
+static void process_user(struct mp_filter *f)
+{
+ struct mp_user_filter *u = f->priv;
+ struct chain *p = u->p;
+
+ mp_filter_set_error_handler(u->f, f);
+ const char *name = u->label ? u->label : u->name;
+ assert(u->name);
+
+ if (!u->failed && mp_filter_has_failed(u->f)) {
+ if (u->is_output_converter) {
+ // This is a fuckup we can't ignore.
+ MP_FATAL(p, "Cannot convert decoder/filter output to any format "
+ "supported by the output.\n");
+ p->public.failed_output_conversion = true;
+ mp_filter_wakeup(p->f);
+ } else {
+ MP_ERR(p, "Disabling filter %s because it has failed.\n", name);
+ mp_filter_reset(u->f); // clear out staled buffered data
+ }
+ u->failed = true;
+ }
+
+ if (u->failed) {
+ if (u->is_output_converter) {
+ if (mp_pin_in_needs_data(f->ppins[1])) {
+ if (!u->error_eof_sent)
+ mp_pin_in_write(f->ppins[1], MP_EOF_FRAME);
+ u->error_eof_sent = true;
+ }
+ return;
+ }
+
+ mp_pin_transfer_data(f->ppins[1], f->ppins[0]);
+ return;
+ }
+
+ mp_pin_transfer_data(u->f->pins[0], f->ppins[0]);
+
+ if (mp_pin_can_transfer_data(f->ppins[1], u->f->pins[1])) {
+ struct mp_frame frame = mp_pin_out_read(u->f->pins[1]);
+
+ check_out_format_change(u, frame);
+
+ mp_pin_in_write(f->ppins[1], frame);
+ }
+}
+
+static void reset_user(struct mp_filter *f)
+{
+ struct mp_user_filter *u = f->priv;
+
+ u->error_eof_sent = false;
+}
+
+static void destroy_user(struct mp_filter *f)
+{
+ struct mp_user_filter *u = f->priv;
+
+ struct m_option dummy = {.type = &m_option_type_obj_settings_list};
+ m_option_free(&dummy, &u->args);
+
+ mp_filter_free_children(f);
+}
+
+static const struct mp_filter_info user_wrapper_filter = {
+ .name = "user_filter_wrapper",
+ .priv_size = sizeof(struct mp_user_filter),
+ .process = process_user,
+ .reset = reset_user,
+ .destroy = destroy_user,
+};
+
+static struct mp_user_filter *create_wrapper_filter(struct chain *p)
+{
+ struct mp_filter *f = mp_filter_create(p->f, &user_wrapper_filter);
+ if (!f)
+ abort();
+ struct mp_user_filter *wrapper = f->priv;
+ wrapper->wrapper = f;
+ wrapper->p = p;
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
+ return wrapper;
+}
+
+// Rebuild p->all_filters and relink the filters. Non-destructive if no change.
+static void relink_filter_list(struct chain *p)
+{
+ struct mp_user_filter **all_filters[3] =
+ {p->pre_filters, p->user_filters, p->post_filters};
+ int all_filters_num[3] =
+ {p->num_pre_filters, p->num_user_filters, p->num_post_filters};
+ p->num_all_filters = 0;
+ for (int n = 0; n < 3; n++) {
+ struct mp_user_filter **filters = all_filters[n];
+ int filters_num = all_filters_num[n];
+ for (int i = 0; i < filters_num; i++)
+ MP_TARRAY_APPEND(p, p->all_filters, p->num_all_filters, filters[i]);
+ }
+
+ assert(p->num_all_filters > 0);
+
+ p->filters_in = NULL;
+ p->filters_out = NULL;
+ for (int n = 0; n < p->num_all_filters; n++) {
+ struct mp_filter *f = p->all_filters[n]->wrapper;
+ if (n == 0)
+ p->filters_in = f->pins[0];
+ if (p->filters_out)
+ mp_pin_connect(f->pins[0], p->filters_out);
+ p->filters_out = f->pins[1];
+ }
+}
+
+static void process(struct mp_filter *f)
+{
+ struct chain *p = f->priv;
+
+ if (mp_pin_can_transfer_data(p->filters_in, f->ppins[0])) {
+ struct mp_frame frame = mp_pin_out_read(f->ppins[0]);
+
+ if (frame.type == MP_FRAME_EOF) {
+ MP_VERBOSE(p, "filter input EOF\n");
+ p->public.got_input_eof = true;
+ }
+
+ mp_pin_in_write(p->filters_in, frame);
+ }
+
+ if (mp_pin_can_transfer_data(f->ppins[1], p->filters_out)) {
+ struct mp_frame frame = mp_pin_out_read(p->filters_out);
+
+ if (frame.type == MP_FRAME_EOF) {
+ MP_VERBOSE(p, "filter output EOF\n");
+ p->public.got_output_eof = true;
+ }
+
+ mp_pin_in_write(f->ppins[1], frame);
+ }
+}
+
+static void reset(struct mp_filter *f)
+{
+ struct chain *p = f->priv;
+
+ p->public.got_input_eof = false;
+ p->public.got_output_eof = false;
+}
+
+void mp_output_chain_reset_harder(struct mp_output_chain *c)
+{
+ struct chain *p = c->f->priv;
+
+ mp_filter_reset(p->f);
+
+ p->public.failed_output_conversion = false;
+ for (int n = 0; n < p->num_all_filters; n++)
+ p->all_filters[n]->failed = false;
+}
+
+static void destroy(struct mp_filter *f)
+{
+ reset(f);
+}
+
+static const struct mp_filter_info output_chain_filter = {
+ .name = "output_chain",
+ .priv_size = sizeof(struct chain),
+ .process = process,
+ .reset = reset,
+ .destroy = destroy,
+};
+
+static double get_display_fps(struct mp_stream_info *i)
+{
+ struct chain *p = i->priv;
+ double res = 0;
+ if (p->vo)
+ vo_control(p->vo, VOCTRL_GET_DISPLAY_FPS, &res);
+ return res;
+}
+
+static double get_container_fps(struct mp_stream_info *i)
+{
+ struct chain *p = i->priv;
+ return p->public.container_fps;
+}
+
+void mp_output_chain_set_vo(struct mp_output_chain *c, struct vo *vo)
+{
+ struct chain *p = c->f->priv;
+
+ p->stream_info.hwdec_devs = vo ? vo->hwdec_devs : NULL;
+ p->stream_info.osd = vo ? vo->osd : NULL;
+ p->stream_info.rotate90 = vo ? vo->driver->caps & VO_CAP_ROTATE90 : false;
+ p->vo = vo;
+ update_output_caps(p);
+}
+
+static struct mp_user_filter *find_by_label(struct chain *p, const char *label)
+{
+ for (int n = 0; n < p->num_user_filters; n++) {
+ struct mp_user_filter *u = p->user_filters[n];
+ if (label && u->label && strcmp(label, u->label) == 0)
+ return u;
+ }
+ return NULL;
+}
+
+bool mp_output_chain_command(struct mp_output_chain *c, const char *target,
+ struct mp_filter_command *cmd)
+{
+ struct chain *p = c->f->priv;
+
+ if (!target || !target[0])
+ return false;
+
+ if (strcmp(target, "all") == 0 && cmd->type == MP_FILTER_COMMAND_TEXT) {
+ // (Following old semantics.)
+ for (int n = 0; n < p->num_user_filters; n++)
+ mp_filter_command(p->user_filters[n]->f, cmd);
+ return true;
+ }
+
+ struct mp_user_filter *f = find_by_label(p, target);
+ if (!f)
+ return false;
+
+ return mp_filter_command(f->f, cmd);
+}
+
+static bool compare_filter(struct m_obj_settings *a, struct m_obj_settings *b)
+{
+ if (a == b || !a || !b)
+ return a == b;
+
+ if (!a->name || !b->name)
+ return a->name == b->name;
+
+ if (!!a->label != !!b->label || (a->label && strcmp(a->label, b->label) != 0))
+ return false;
+
+ if (a->enabled != b->enabled)
+ return false;
+
+ if (!a->attribs || !a->attribs[0])
+ return !b->attribs || !b->attribs[0];
+
+ for (int n = 0; a->attribs[n] || b->attribs[n]; n++) {
+ if (!a->attribs[n] || !b->attribs[n])
+ return false;
+ if (strcmp(a->attribs[n], b->attribs[n]) != 0)
+ return false;
+ }
+
+ return true;
+}
+
+bool mp_output_chain_update_filters(struct mp_output_chain *c,
+ struct m_obj_settings *list)
+{
+ struct chain *p = c->f->priv;
+
+ struct mp_user_filter **add = NULL; // new filters
+ int num_add = 0;
+ struct mp_user_filter **res = NULL; // new final list
+ int num_res = 0;
+ bool *used = talloc_zero_array(NULL, bool, p->num_user_filters);
+
+ for (int n = 0; list && list[n].name; n++) {
+ struct m_obj_settings *entry = &list[n];
+
+ if (!entry->enabled)
+ continue;
+
+ struct mp_user_filter *u = NULL;
+
+ for (int i = 0; i < p->num_user_filters; i++) {
+ if (!used[i] && compare_filter(entry, p->user_filters[i]->args)) {
+ u = p->user_filters[i];
+ used[i] = true;
+ break;
+ }
+ }
+
+ if (!u) {
+ u = create_wrapper_filter(p);
+ u->name = talloc_strdup(u, entry->name);
+ u->label = talloc_strdup(u, entry->label);
+ u->f = mp_create_user_filter(u->wrapper, p->type, entry->name,
+ entry->attribs);
+ if (!u->f) {
+ talloc_free(u->wrapper);
+ goto error;
+ }
+
+ struct m_obj_settings *args = (struct m_obj_settings[2]){*entry, {0}};
+
+ struct m_option dummy = {.type = &m_option_type_obj_settings_list};
+ m_option_copy(&dummy, &u->args, &args);
+
+ MP_TARRAY_APPEND(NULL, add, num_add, u);
+ }
+
+ MP_TARRAY_APPEND(p, res, num_res, u);
+ }
+
+ // At this point we definitely know we'll use the new list, so clean up.
+
+ for (int n = 0; n < p->num_user_filters; n++) {
+ if (!used[n])
+ talloc_free(p->user_filters[n]->wrapper);
+ }
+
+ talloc_free(p->user_filters);
+ p->user_filters = res;
+ p->num_user_filters = num_res;
+
+ relink_filter_list(p);
+
+ for (int n = 0; n < p->num_user_filters; n++) {
+ struct mp_user_filter *u = p->user_filters[n];
+ if (u->generated_label)
+ TA_FREEP(&u->label);
+ if (!u->label) {
+ for (int i = 0; i < 100; i++) {
+ char *label = mp_tprintf(80, "%s.%02d", u->name, i);
+ if (!find_by_label(p, label)) {
+ u->label = talloc_strdup(u, label);
+ u->generated_label = true;
+ break;
+ }
+ }
+ }
+ }
+
+ MP_VERBOSE(p, "User filter list:\n");
+ for (int n = 0; n < p->num_user_filters; n++) {
+ struct mp_user_filter *u = p->user_filters[n];
+ MP_VERBOSE(p, " %s (%s)\n", u->name, u->label ? u->label : "-");
+ }
+ if (!p->num_user_filters)
+ MP_VERBOSE(p, " (empty)\n");
+
+ // Filters can load hwdec interops, which might add new formats.
+ update_output_caps(p);
+
+ mp_filter_wakeup(p->f);
+
+ talloc_free(add);
+ talloc_free(used);
+ return true;
+
+error:
+ for (int n = 0; n < num_add; n++)
+ talloc_free(add[n]);
+ talloc_free(add);
+ talloc_free(used);
+ return false;
+}
+
+static void create_video_things(struct chain *p)
+{
+ p->frame_type = MP_FRAME_VIDEO;
+
+ p->stream_info.priv = p;
+ p->stream_info.get_display_fps = get_display_fps;
+ p->stream_info.get_container_fps = get_container_fps;
+
+ p->f->stream_info = &p->stream_info;
+
+ struct mp_user_filter *f = create_wrapper_filter(p);
+ f->name = "userdeint";
+ f->f = mp_deint_create(f->wrapper);
+ if (!f->f)
+ abort();
+ MP_TARRAY_APPEND(p, p->pre_filters, p->num_pre_filters, f);
+
+ f = create_wrapper_filter(p);
+ f->name = "autorotate";
+ f->f = mp_autorotate_create(f->wrapper);
+ if (!f->f)
+ abort();
+ MP_TARRAY_APPEND(p, p->post_filters, p->num_post_filters, f);
+}
+
+struct mp_output_chain *mp_output_chain_create(struct mp_filter *parent,
+ enum mp_output_chain_type type)
+{
+ struct mp_filter *f = mp_filter_create(parent, &output_chain_filter);
+ if (!f)
+ return NULL;
+
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
+
+ const char *log_name = NULL;
+ switch (type) {
+ case MP_OUTPUT_CHAIN_VIDEO: log_name = "!vf"; break;
+ }
+ if (log_name)
+ f->log = mp_log_new(f, parent->global->log, log_name);
+
+ struct chain *p = f->priv;
+ p->f = f;
+ p->log = f->log;
+ p->type = type;
+
+ struct mp_output_chain *c = &p->public;
+ c->f = f;
+
+ // Dummy filter for reporting and logging the input format.
+ p->input = create_wrapper_filter(p);
+ p->input->f = mp_bidir_nop_filter_create(p->input->wrapper);
+ if (!p->input->f)
+ abort();
+ p->input->name = "in";
+ p->input->is_input = true;
+ MP_TARRAY_APPEND(p, p->pre_filters, p->num_pre_filters, p->input);
+
+ switch (type) {
+ case MP_OUTPUT_CHAIN_VIDEO: create_video_things(p); break;
+ }
+
+ p->output = create_wrapper_filter(p);
+ p->convert = mp_autoconvert_create(p->output->wrapper);
+ if (!p->convert)
+ abort();
+ p->output->name = "convert";
+ p->output->is_output_converter = true;
+ p->output->f = p->convert->f;
+ MP_TARRAY_APPEND(p, p->post_filters, p->num_post_filters, p->output);
+
+ relink_filter_list(p);
+
+ reset(f);
+
+ return c;
+}
diff --git a/filters/f_output_chain.h b/filters/f_output_chain.h
new file mode 100644
index 0000000000..64667ed1bd
--- /dev/null
+++ b/filters/f_output_chain.h
@@ -0,0 +1,59 @@
+#pragma once
+
+#include "options/m_option.h"
+#include "video/mp_image.h"
+
+#include "filter.h"
+
+enum mp_output_chain_type {
+ MP_OUTPUT_CHAIN_VIDEO = 1, // --vf
+};
+
+// A classic single-media filter chain, which reflects --vf and --af.
+// It manages the user-specified filter chain, and VO/AO output conversions.
+// Also handles some automatic filtering (auto rotation and such).
+struct mp_output_chain {
+ // This filter will have 1 input (from decoder) and 1 output (to VO/AO).
+ struct mp_filter *f;
+
+ bool got_input_eof;
+ bool got_output_eof;
+
+ // The filter chain output could not be converted to any format the output
+ // supports.
+ bool failed_output_conversion;
+
+ // Set if any formats in the chain changed. The user can reset the flag.
+ // For implementing change notifications out input/output_params.
+ bool reconfig_happened;
+
+ // --- for type==MP_OUTPUT_CHAIN_VIDEO
+ struct mp_image_params input_params;
+ struct mp_image_params output_params;
+ double container_fps;
+};
+
+// (free by freeing mp_output_chain.f)
+struct mp_output_chain *mp_output_chain_create(struct mp_filter *parent,
+ enum mp_output_chain_type type);
+
+// Set the VO, which will be used to determine basic capabilities like format
+// and rotation support, and to init hardware filtering things.
+// For type==MP_OUTPUT_CHAIN_VIDEO only.
+struct vo;
+void mp_output_chain_set_vo(struct mp_output_chain *p, struct vo *vo);
+
+// Send a command to the filter with the target label.
+bool mp_output_chain_command(struct mp_output_chain *p, const char *target,
+ struct mp_filter_command *cmd);
+
+// Perform a seek reset _and_ reset all filter failure states, so that future
+// filtering continues normally.
+void mp_output_chain_reset_harder(struct mp_output_chain *p);
+
+// Try to exchange the filter list. If creation of any filter fails, roll
+// back the changes, and return false.
+struct m_obj_settings;
+bool mp_output_chain_update_filters(struct mp_output_chain *p,
+ struct m_obj_settings *list);
+
diff --git a/filters/f_swscale.c b/filters/f_swscale.c
new file mode 100644
index 0000000000..953b5ec77e
--- /dev/null
+++ b/filters/f_swscale.c
@@ -0,0 +1,148 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <assert.h>
+
+#include <libswscale/swscale.h>
+
+#include "common/av_common.h"
+#include "common/msg.h"
+
+#include "options/options.h"
+
+#include "video/img_format.h"
+#include "video/mp_image.h"
+#include "video/mp_image_pool.h"
+#include "video/sws_utils.h"
+#include "video/fmt-conversion.h"
+
+#include "f_swscale.h"
+#include "filter.h"
+#include "filter_internal.h"
+
+int mp_sws_find_best_out_format(int in_format, int *out_formats,
+ int num_out_formats)
+{
+ if (sws_isSupportedInput(imgfmt2pixfmt(in_format)) < 1)
+ return 0;
+
+ int best = 0;
+ for (int n = 0; n < num_out_formats; n++) {
+ int out_format = out_formats[n];
+
+ if (sws_isSupportedOutput(imgfmt2pixfmt(out_format)) < 1)
+ continue;
+
+ if (best) {
+ int candidate = mp_imgfmt_select_best(best, out_format, in_format);
+ if (candidate)
+ best = candidate;
+ } else {
+ best = out_format;
+ }
+ }
+ return best;
+}
+
+bool mp_sws_supports_input(int imgfmt)
+{
+ return sws_isSupportedInput(imgfmt2pixfmt(imgfmt));
+}
+
+static void process(struct mp_filter *f)
+{
+ struct mp_sws_filter *s = f->priv;
+
+ if (!mp_pin_can_transfer_data(f->ppins[1], f->ppins[0]))
+ return;
+
+ struct mp_frame frame = mp_pin_out_read(f->ppins[0]);
+ if (mp_frame_is_signaling(frame)) {
+ mp_pin_in_write(f->ppins[1], frame);
+ return;
+ }
+
+ if (frame.type != MP_FRAME_VIDEO) {
+ MP_ERR(f, "video frame expected\n");
+ goto error;
+ }
+
+ struct mp_image *src = frame.data;
+ int dstfmt = s->out_format ? s->out_format : src->imgfmt;
+
+ struct mp_image *dst = mp_image_pool_get(s->pool, dstfmt, src->w, src->h);
+ if (!dst)
+ goto error;
+
+ mp_image_copy_attributes(dst, src);
+
+ // If we convert from RGB to YUV, default to limited range.
+ if (mp_imgfmt_get_forced_csp(src->imgfmt) == MP_CSP_RGB &&
+ mp_imgfmt_get_forced_csp(dst->imgfmt) == MP_CSP_AUTO)
+ {
+ dst->params.color.levels = MP_CSP_LEVELS_TV;
+ }
+ mp_image_params_guess_csp(&dst->params);
+
+ bool ok = mp_sws_scale(s->sws, dst, src) >= 0;
+
+ mp_frame_unref(&frame);
+ frame = (struct mp_frame){MP_FRAME_VIDEO, dst};
+
+ if (!ok)
+ goto error;
+
+ mp_pin_in_write(f->ppins[1], frame);
+ return;
+
+error:
+ mp_frame_unref(&frame);
+ mp_filter_internal_mark_failed(f);
+ return;
+}
+
+static const struct mp_filter_info sws_filter = {
+ .name = "swscale",
+ .priv_size = sizeof(struct mp_sws_filter),
+ .process = process,
+};
+
+struct mp_sws_filter *mp_sws_filter_create(struct mp_filter *parent)
+{
+ struct mp_filter *f = mp_filter_create(parent, &sws_filter);
+ if (!f)
+ return NULL;
+
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
+
+ struct mp_sws_filter *s = f->priv;
+ s->f = f;
+ s->sws = mp_sws_alloc(s);
+ s->sws->log = f->log;
+ s->pool = mp_image_pool_new(s);
+
+ mp_sws_set_from_cmdline(s->sws, f->global);
+
+ return s;
+}
diff --git a/filters/f_swscale.h b/filters/f_swscale.h
new file mode 100644
index 0000000000..0c434c1fd3
--- /dev/null
+++ b/filters/f_swscale.h
@@ -0,0 +1,25 @@
+#pragma once
+
+#include <stdbool.h>
+
+struct mp_sws_filter {
+ struct mp_filter *f;
+ // Desired output imgfmt. If 0, uses the input format.
+ int out_format;
+ // private state
+ struct mp_sws_context *sws;
+ struct mp_image_pool *pool;
+};
+
+// Create the filter. Free it with talloc_free(mp_sws_filter.f).
+struct mp_sws_filter *mp_sws_filter_create(struct mp_filter *parent);
+
+// Return the best format based on the input format and a list of allowed output
+// formats. This tries to set the output format to the one that will result in
+// the least loss. Returns a format from out_formats[], or 0 if no format could
+// be chosen (or it's not supported by libswscale).
+int mp_sws_find_best_out_format(int in_format, int *out_formats,
+ int num_out_formats);
+
+// Whether ther given format is supported as input format.
+bool mp_sws_supports_input(int imgfmt);
diff --git a/filters/f_utils.c b/filters/f_utils.c
new file mode 100644
index 0000000000..f984a3b33a
--- /dev/null
+++ b/filters/f_utils.c
@@ -0,0 +1,175 @@
+#include "video/mp_image.h"
+
+#include "f_utils.h"
+#include "filter_internal.h"
+
+struct frame_duration_priv {
+ struct mp_image *buffered;
+};
+
+static void frame_duration_process(struct mp_filter *f)
+{
+ struct frame_duration_priv *p = f->priv;
+
+ if (!mp_pin_can_transfer_data(f->ppins[1], f->ppins[0]))
+ return;
+
+ struct mp_frame frame = mp_pin_out_read(f->ppins[0]);
+
+ if (frame.type == MP_FRAME_EOF && p->buffered) {
+ mp_pin_in_write(f->ppins[1], MAKE_FRAME(MP_FRAME_VIDEO, p->buffered));
+ p->buffered = NULL;
+ // Pass through the actual EOF in the next iteration.
+ mp_pin_out_repeat_eof(f->ppins[0]);
+ } else if (frame.type == MP_FRAME_VIDEO) {
+ struct mp_image *next = frame.data;
+ if (p->buffered) {
+ if (p->buffered->pts != MP_NOPTS_VALUE &&
+ next->pts != MP_NOPTS_VALUE &&
+ next->pts >= p->buffered->pts)
+ p->buffered->pkt_duration = next->pts - p->buffered->pts;
+ mp_pin_in_write(f->ppins[1], MAKE_FRAME(MP_FRAME_VIDEO, p->buffered));
+ } else {
+ mp_pin_out_request_data(f->ppins[0]);
+ }
+ p->buffered = next;
+ } else {
+ mp_pin_in_write(f->ppins[1], frame);
+ }
+}
+
+static void frame_duration_reset(struct mp_filter *f)
+{
+ struct frame_duration_priv *p = f->priv;
+
+ mp_image_unrefp(&p->buffered);
+}
+
+static const struct mp_filter_info frame_duration_filter = {
+ .name = "frame_duration",
+ .priv_size = sizeof(struct frame_duration_priv),
+ .process = frame_duration_process,
+ .reset = frame_duration_reset,
+};
+
+struct mp_filter *mp_compute_frame_duration_create(struct mp_filter *parent)
+{
+ struct mp_filter *f = mp_filter_create(parent, &frame_duration_filter);
+ if (!f)
+ return NULL;
+
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
+
+ return f;
+}
+
+void mp_chain_filters(struct mp_pin *in, struct mp_pin *out,
+ struct mp_filter **filters, int num_filters)
+{
+ for (int n = 0; n < num_filters; n++) {
+ if (!filters[n])
+ continue;
+ assert(filters[n]->num_pins == 2);
+ mp_pin_connect(filters[n]->pins[0], in);
+ in = filters[n]->pins[1];
+ }
+ mp_pin_connect(out, in);
+}
+
+// Make it repeat process().
+static void mark_progress(struct mp_subfilter *sub)
+{
+ // f == NULL is not really allowed, but at least don't crash.
+ struct mp_filter *f = mp_pin_get_manual_connection(sub->in);
+ if (f)
+ mp_filter_internal_mark_progress(f);
+}
+
+bool mp_subfilter_read(struct mp_subfilter *sub)
+{
+ if (sub->filter) {
+ if (mp_pin_can_transfer_data(sub->out, sub->filter->pins[1])) {
+ struct mp_frame frame = mp_pin_out_read(sub->filter->pins[1]);
+ if (sub->draining && frame.type == MP_FRAME_EOF) {
+ sub->draining = false;
+ TA_FREEP(&sub->filter);
+ mark_progress(sub);
+ return false;
+ }
+ mp_pin_in_write(sub->out, frame);
+ return false;
+ }
+ if (sub->draining)
+ return false;
+ }
+
+ struct mp_pin *out = sub->filter ? sub->filter->pins[0] : sub->out;
+
+ if (sub->frame.type)
+ return mp_pin_in_needs_data(out);
+
+ if (!mp_pin_can_transfer_data(out, sub->in))
+ return false;
+
+ sub->frame = mp_pin_out_read(sub->in);
+ return true;
+}
+
+void mp_subfilter_reset(struct mp_subfilter *sub)
+{
+ if (sub->filter && sub->draining)
+ TA_FREEP(&sub->filter);
+ sub->draining = false;
+ mp_frame_unref(&sub->frame);
+}
+
+void mp_subfilter_continue(struct mp_subfilter *sub)
+{
+ struct mp_pin *out = sub->filter ? sub->filter->pins[0] : sub->out;
+ // It was made sure earlier that the pin is writable, unless the filter
+ // was newly created, or a previously existing filter (which was going to
+ // accept input) was destroyed. In those cases, essentially restart
+ // data flow.
+ if (!mp_pin_in_needs_data(out)) {
+ mark_progress(sub);
+ return;
+ }
+ mp_pin_in_write(out, sub->frame);
+ sub->frame = MP_NO_FRAME;
+}
+
+void mp_subfilter_destroy(struct mp_subfilter *sub)
+{
+ TA_FREEP(&sub->filter);
+ sub->draining = false;
+}
+
+bool mp_subfilter_drain_destroy(struct mp_subfilter *sub)
+{
+ if (!sub->draining && sub->filter) {
+ // We know the filter is writable (unless the user created a new filter
+ // and immediately called this function, which is invalid).
+ mp_pin_in_write(sub->filter->pins[0], MP_EOF_FRAME);
+ sub->draining = true;
+ }
+ return !sub->filter;
+}
+
+static const struct mp_filter_info bidir_nop_filter = {
+ .name = "nop",
+};
+
+struct mp_filter *mp_bidir_nop_filter_create(struct mp_filter *parent)
+{
+ struct mp_filter *f = mp_filter_create(parent, &bidir_nop_filter);
+ if (!f)
+ return NULL;
+
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
+
+ mp_pin_connect(f->ppins[1], f->ppins[0]);
+
+ return f;
+}
diff --git a/filters/f_utils.h b/filters/f_utils.h
new file mode 100644
index 0000000000..a59ac1601d
--- /dev/null
+++ b/filters/f_utils.h
@@ -0,0 +1,72 @@
+#pragma once
+
+#include "filter.h"
+
+// Filter that computes the exact duration of video frames by buffering 1 frame,
+// and taking the PTS difference. This supports video frames only, and stores
+// the duration in mp_image.pkt_duration. All other frame types are passed
+// through.
+struct mp_filter *mp_compute_frame_duration_create(struct mp_filter *parent);
+
+// Given the filters[0..num_filters] array, connect in with the input of the
+// first filter, connect the output of the first filter to the input to the
+// second filter, etc., until out. All filters are assumed to be bidrectional,
+// with input on pin 0 and output on pin 1. NULL entries are skipped.
+void mp_chain_filters(struct mp_pin *in, struct mp_pin *out,
+ struct mp_filter **filters, int num_filters);
+
+// Helper for maintaining a sub-filter that is created or destroyed on demand,
+// because it might depend on frame input formats or is otherwise dynamically
+// changing. (This is overkill for more static sub filters, or entirely manual
+// filtering.)
+// To initialize this, zero-init all fields, and set the in/out fields.
+struct mp_subfilter {
+ // These two fields must be set on init. The pins must have a manual
+ // connection to the filter whose process() function calls the
+ // mp_subfilter_*() functions.
+ struct mp_pin *in, *out;
+ // Temporary buffered frame, as triggered by mp_subfilter_read(). You can
+ // not mutate this (unless you didn't create or destroy sub->filter).
+ struct mp_frame frame;
+ // The sub-filter, set by the user. Can be NULL if disabled. If set, this
+ // must be a bidirectional filter, with manual connections same as
+ // mp_sub_filter.in/out (to get the correct process() function called).
+ // Set this only if it's NULL. You should not overwrite this if it's set.
+ // Use either mp_subfilter_drain_destroy(), mp_subfilter_destroy(), or
+ // mp_subfilter_reset() to unset and destroy the filter gracefully.
+ struct mp_filter *filter;
+ // Internal state.
+ bool draining;
+};
+
+// Make requests for a new frame.
+// Returns whether sub->frame is set to anything. If true is returned, you
+// must either call mp_subfilter_continue() or mp_subfilter_drain_destroy()
+// once to continue data flow normally (otherwise it will stall). If you call
+// mp_subfilter_drain_destroy(), and it returns true, or you call
+// mp_subfilter_destroy(), you can call mp_subfilter_continue() once after it.
+// If this returns true, sub->frame is never unset (MP_FRAME_NONE).
+bool mp_subfilter_read(struct mp_subfilter *sub);
+
+// Clear internal state (usually to be called by parent filter's reset(), or
+// destroy()). This usually does not free sub->filter.
+void mp_subfilter_reset(struct mp_subfilter *sub);
+
+// Continue filtering sub->frame. This can happen after setting a new filter
+// too.
+void mp_subfilter_continue(struct mp_subfilter *sub);
+
+// Destroy the filter immediately (if it's set). You must call
+// mp_subfilter_continue() after this to propagate sub->frame.
+void mp_subfilter_destroy(struct mp_subfilter *sub);
+
+// Make sure the filter is destroyed. Returns true if the filter was destroyed.
+// If this returns false, exit your process() function, so dataflow can
+// continue normally. (process() is repeated until this function returns true,
+// which can take a while if sub->filter has many frames buffered).
+// If this returns true, call mp_subfilter_continue() to propagate sub->frame.
+// The filter is destroyed with talloc_free(sub->filter).
+bool mp_subfilter_drain_destroy(struct mp_subfilter *sub);
+
+// A bidrectional filter which passes through all data.
+struct mp_filter *mp_bidir_nop_filter_create(struct mp_filter *parent);
diff --git a/filters/filter.c b/filters/filter.c
new file mode 100644
index 0000000000..bbd8a4ff5b
--- /dev/null
+++ b/filters/filter.c
@@ -0,0 +1,790 @@
+#include <pthread.h>
+
+#include "common/common.h"
+#include "common/global.h"
+#include "common/msg.h"
+#include "video/hwdec.h"
+
+#include "filter.h"
+#include "filter_internal.h"
+
+// Note about connections:
+// They can be confusing, because pins come in pairs, and multiple pins can be
+// transitively connected via mp_pin_connect(). To avoid dealing with this,
+// mp_pin.conn is used to skip redundant connected pins.
+// Consider <1a|1b> a symbol for mp_pin pair #1 and f1 as filter #1. Then:
+// f1 <-> <1a|1b> <-> <2a|2b> <-> <3a|3b> <-> f2
+// would be a connection from 1a to 3b. 1a could be a private pin of f1 (e.g.
+// mp_filter.ppin[0]), and 1b would be the public pin (e.g. mp_filter.pin[0]).
+// A user could have called mp_pin_connect(2a, 1b) mp_pin_connect(3a, 2b)
+// (assuming 1b has dir==MP_PIN_OUT). The end result are the following values:
+// pin user_conn conn manual_connection within_conn (uses mp_pin.data)
+// 1a NULL 3b f1 false no
+// 1b 2a NULL NULL true no
+// 2a 1b NULL NULL true no
+// 2b 3a NULL NULL true no
+// 3a 2b NULL NULL true no
+// 3b NULL 1a f2 false yes
+// The minimal case of f1 <-> <1a|1b> <-> f2 (1b dir=out) would be:
+// 1a NULL 1b f1 false no
+// 1b NULL 1a f2 false yes
+// In both cases, only the final output pin uses mp_pin.data/data_requested.
+struct mp_pin {
+ const char *name;
+ enum mp_pin_dir dir;
+ struct mp_pin *other; // paired mp_pin representing other end
+ struct mp_filter *owner;
+
+ struct mp_pin *user_conn; // as set by mp_pin_connect()
+ struct mp_pin *conn; // transitive, actual end of the connection
+
+ // Set if the pin is considered connected, but has no user_conn. pin
+ // state changes are handled by the given filter. (Defaults to the root
+ // filter if the pin is for the user of a filter graph.)
+ // As an invariant, conn and manual_connection are both either set or unset.
+ struct mp_filter *manual_connection;
+
+ // Set if the pin is indirect part of a connection chain, but not one of
+ // the end pins. Basically it's a redundant in-between pin. You never access
+ // these with the pin data flow functions, because only the end pins matter.
+ // This flag is for checking and enforcing this.
+ bool within_conn;
+
+ // This is used for the final output mp_pin in connections only.
+ bool data_requested; // true if out wants new data
+ struct mp_frame data; // possibly buffered frame (MP_FRAME_NONE if
+ // empty, usually only temporary)
+};
+
+// Root filters create this, all other filters reference it.
+struct filter_runner {
+ struct mpv_global *global;
+
+ void (*wakeup_cb)(void *ctx);
+ void *wakeup_ctx;
+
+ struct mp_filter *root_filter;
+
+ // If we're currently running the filter graph (for avoiding recursion).
+ bool filtering;
+
+ // Set of filters which need process() to be called. A filter is in this
+ // array iff mp_filter_internal.pending==true.
+ struct mp_filter **pending;
+ int num_pending;
+
+ // Any outside pins have changed state.
+ bool external_pending;
+
+ // For async notifications only. We don't bother making this fine grained
+ // across filters.
+ pthread_mutex_t async_lock;
+
+ // Wakeup is pending. Protected by async_lock.
+ bool async_wakeup_sent;
+
+ // Similar to pending[]. Uses mp_filter_internal.async_pending. Protected
+ // by async_lock.
+ struct mp_filter **async_pending;
+ int num_async_pending;
+};
+
+struct mp_filter_internal {
+ const struct mp_filter_info *info;
+
+ struct mp_filter *parent;
+ struct filter_runner *runner;
+
+ struct mp_filter **children;
+ int num_children;
+
+ struct mp_filter *error_handler;
+
+ char *name;
+
+ bool pending;
+ bool async_pending;
+ bool failed;
+};
+
+static void add_pending(struct mp_filter *f)
+{
+ struct filter_runner *r = f->in->runner;
+
+ if (f->in->pending)
+ return;
+
+ // This should probably really be some sort of priority queue, but for now
+ // something naive and dumb does the job too.
+ f->in->pending = true;
+ MP_TARRAY_APPEND(r, r->pending, r->num_pending, f);
+}
+
+// Called when new work needs to be done on a pin belonging to the filter:
+// - new data was requested
+// - new data has been queued
+// - or just an connect/disconnect/async notification happened
+// This means the process function for this filter has to be called next.
+static void update_filter(struct mp_filter *src, struct mp_filter *f)
+{
+ assert(f);
+ struct filter_runner *r = f->in->runner;
+
+ // Make sure the filter knows it has to make progress.
+ if (src->in->runner != r) {
+ // Connected to a different graph. The user has to drive those manually,
+ // and we simplify tell the user via the mp_filter_run() return value.
+ r->external_pending = true;
+ } else if (!f->in->pending) {
+ add_pending(f);
+
+ if (!r->filtering) {
+ // Likely the "outer" API user used an external manually connected
+ // pin, so do recursive filtering (as a not strictly necessary
+ // feature which makes outside I/O with filters easier).
+ // Also don't lose the pending state, which the user may or may not
+ // care about.
+ // Note that we must avoid calling this from within filtering,
+ // because that would make the process() functions recursive and
+ // reentrant (and hard to reason about).
+ r->external_pending |= mp_filter_run(r->root_filter);
+ }
+
+ // Need to tell user that something changed.
+ if (f == r->root_filter)
+ r->external_pending = true;
+ }
+}
+
+void mp_filter_internal_mark_progress(struct mp_filter *f)
+{
+ struct filter_runner *r = f->in->runner;
+ assert(r->filtering); // only call from f's process()
+ add_pending(f);
+}
+
+// Basically copy the async notifications to the sync ones. Done so that the
+// sync notifications don't need any locking.
+static void flush_async_notifications(struct filter_runner *r, bool queue)
+{
+ pthread_mutex_lock(&r->async_lock);
+ for (int n = 0; n < r->num_async_pending; n++) {
+ struct mp_filter *f = r->async_pending[n];
+ if (queue)
+ add_pending(f);
+ f->in->async_pending = false;
+ }
+ r->num_async_pending = 0;
+ r->async_wakeup_sent = false;
+ pthread_mutex_unlock(&r->async_lock);
+}
+
+bool mp_filter_run(struct mp_filter *filter)
+{
+ struct filter_runner *r = filter->in->runner;
+
+ r->filtering = true;
+
+ flush_async_notifications(r, true);
+
+ while (r->num_pending) {
+ struct mp_filter *next = r->pending[r->num_pending - 1];
+ r->num_pending -= 1;
+ next->in->pending = false;
+
+ if (next->in->info->process)
+ next->in->info->process(next);
+ }
+
+ r->filtering = false;
+
+ bool externals = r->external_pending;
+ r->external_pending = false;
+ return externals;
+}
+
+bool mp_pin_can_transfer_data(struct mp_pin *dst, struct mp_pin *src)
+{
+ return mp_pin_in_needs_data(dst) && mp_pin_out_request_data(src);
+}
+
+bool mp_pin_transfer_data(struct mp_pin *dst, struct mp_pin *src)
+{
+ if (!mp_pin_can_transfer_data(dst, src))
+ return false;
+ mp_pin_in_write(dst, mp_pin_out_read(src));
+ return true;
+}
+
+bool mp_pin_in_needs_data(struct mp_pin *p)
+{
+ assert(p->dir == MP_PIN_IN);
+ assert(!p->within_conn);
+ return p->conn && p->conn->manual_connection && p->conn->data_requested;
+}
+
+bool mp_pin_in_write(struct mp_pin *p, struct mp_frame frame)
+{
+ if (!mp_pin_in_needs_data(p) || frame.type == MP_FRAME_NONE) {
+ if (frame.type)
+ MP_ERR(p->owner, "losing frame on %s\n", p->name);
+ mp_frame_unref(&frame);
+ return false;
+ }
+ assert(p->conn->data.type == MP_FRAME_NONE);
+ p->conn->data = frame;
+ p->conn->data_requested = false;
+ update_filter(p->owner, p->conn->manual_connection);
+ return true;
+}
+
+bool mp_pin_out_has_data(struct mp_pin *p)
+{
+ assert(p->dir == MP_PIN_OUT);
+ assert(!p->within_conn);
+ return p->conn && p->conn->manual_connection && p->data.type != MP_FRAME_NONE;
+}
+
+bool mp_pin_out_request_data(struct mp_pin *p)
+{
+ if (mp_pin_out_has_data(p))
+ return true;
+ if (p->conn && p->conn->manual_connection && !p->data_requested) {
+ p->data_requested = true;
+ update_filter(p->owner, p->conn->manual_connection);
+ }
+ return mp_pin_out_has_data(p);
+}
+
+struct mp_frame mp_pin_out_read(struct mp_pin *p)
+{
+ if (!mp_pin_out_request_data(p))
+ return MP_NO_FRAME;
+ struct mp_frame res = p->data;
+ p->data = MP_NO_FRAME;
+ return res;
+}
+
+void mp_pin_out_unread(struct mp_pin *p, struct mp_frame frame)
+{
+ assert(p->dir == MP_PIN_OUT);
+ assert(!p->within_conn);
+ assert(p->conn && p->conn->manual_connection);
+ // Unread is allowed strictly only if you didn't do anything else with
+ // the pin since the time you read it.
+ assert(!mp_pin_out_has_data(p));
+ assert(!p->data_requested);
+ p->data = frame;
+}
+
+void mp_pin_out_repeat_eof(struct mp_pin *p)
+{
+ mp_pin_out_unread(p, MP_EOF_FRAME);
+}
+
+// Follow mp_pin pairs/connection into the "other" direction of the pin, until
+// the last pin is found. (In the simplest case, this is just p->other.) E.g.:
+// <1a|1b> <-> <2a|2b> <-> <3a|3b>
+// find_connected_end(2b)==1a
+// find_connected_end(1b)==1a
+// find_connected_end(1a)==3b
+static struct mp_pin *find_connected_end(struct mp_pin *p)
+{
+ while (1) {
+ struct mp_pin *other = p->other;
+ if (!other->user_conn)
+ return other;
+ p = other->user_conn;
+ }
+ assert(0);
+}
+
+// With p being part of a connection, create the pin_connection and set all
+// state flags.
+static void init_connection(struct mp_pin *p)
+{
+ if (p->dir == MP_PIN_IN)
+ p = p->other;
+
+ struct mp_pin *in = find_connected_end(p);
+ struct mp_pin *out = find_connected_end(p->other);
+
+ // These are the "outer" pins by definition, they have no user connections.
+ assert(!in->user_conn);
+ assert(!out->user_conn);
+
+ // Logicaly, the ends are always manual connections. A pin chain without
+ // manual connections at the ends is still disconnected (or if this
+ // attempted to extend an existing connection, becomes dangling and gets
+ // disconnected).
+ if (!in->manual_connection && !out->manual_connection)
+ return;
+
+ assert(in->dir == MP_PIN_IN);
+ assert(out->dir == MP_PIN_OUT);
+
+ struct mp_pin *cur = in;
+ while (cur) {
+ assert(!cur->within_conn && !cur->other->within_conn);
+ assert(!cur->conn && !cur->other->conn);
+ assert(!cur->data_requested); // unused for in pins
+ assert(!cur->data.type); // unused for in pins
+ assert(!cur->other->data_requested); // unset for unconnected out pins
+ assert(!cur->other->data.type); // unset for unconnected out pins
+ cur->within_conn = cur->other->within_conn = true;
+ cur = cur->other->user_conn;
+ }
+
+ in->conn = out;
+ in->within_conn = false;
+ out->conn = in;
+ out->within_conn = false;
+
+ // Scheduling so far will be messed up.
+ add_pending(in->manual_connection);
+ add_pending(out->manual_connection);
+}
+
+void mp_pin_connect(struct mp_pin *dst, struct mp_pin *src)
+{
+ assert(src->dir == MP_PIN_OUT);
+ assert(dst->dir == MP_PIN_IN);
+
+ if (dst->user_conn == src) {
+ assert(src->user_conn == dst);
+ return;
+ }
+
+ mp_pin_disconnect(src);
+ mp_pin_disconnect(dst);
+
+ src->user_conn = dst;
+ dst->user_conn = src;
+
+ init_connection(src);
+}
+
+void mp_pin_set_manual_connection(struct mp_pin *p, bool connected)
+{
+ mp_pin_set_manual_connection_for(p, connected ? p->owner->in->parent : NULL);
+}
+
+void mp_pin_set_manual_connection_for(struct mp_pin *p, struct mp_filter *f)
+{
+ if (p->manual_connection == f)
+ return;
+ if (p->within_conn)
+ mp_pin_disconnect(p);
+ p->manual_connection = f;
+ init_connection(p);
+}
+
+struct mp_filter *mp_pin_get_manual_connection(struct mp_pin *p)
+{
+ return p->manual_connection;
+}
+
+static void deinit_connection(struct mp_pin *p)
+{
+ if (p->dir == MP_PIN_OUT)
+ p = p->other;
+
+ p = find_connected_end(p);
+
+ while (p) {
+ p->conn = p->other->conn = NULL;
+ p->within_conn = p->other->within_conn = false;
+ assert(!p->other->data_requested); // unused for in pins
+ assert(!p->other->data.type); // unused for in pins
+ p->data_requested = false;
+ if (p->data.type)
+ MP_WARN(p->owner, "dropping frame due to pin disconnect\n");
+ if (p->data_requested)
+ MP_WARN(p->owner, "dropping request due to pin disconnect\n");
+ mp_frame_unref(&p->data);
+ p = p->other->user_conn;
+ }
+}
+
+void mp_pin_disconnect(struct mp_pin *p)
+{
+ if (!mp_pin_is_connected(p))
+ return;
+
+ p->manual_connection = NULL;
+
+ struct mp_pin *conn = p->user_conn;
+ if (conn) {
+ p->user_conn = NULL;
+ conn->user_conn = NULL;
+ deinit_connection(conn);
+ }
+
+ deinit_connection(p);
+}
+
+bool mp_pin_is_connected(struct mp_pin *p)
+{
+ return p->user_conn || p->manual_connection;
+}
+
+const char *mp_pin_get_name(struct mp_pin *p)
+{
+ return p->name;
+}
+
+enum mp_pin_dir mp_pin_get_dir(struct mp_pin *p)
+{
+ return p->dir;
+}
+
+const char *mp_filter_get_name(struct mp_filter *f)
+{
+ return f->in->name;
+}
+
+void mp_filter_set_name(struct mp_filter *f, const char *name)
+{
+ talloc_free(f->in->name);
+ f->in->name = talloc_strdup(f, name);
+}
+
+struct mp_pin *mp_filter_get_named_pin(struct mp_filter *f, const char *name)
+{
+ for (int n = 0; n < f->num_pins; n++) {
+ if (name && strcmp(f->pins[n]->name, name) == 0)
+ return f->pins[n];
+ }
+ return NULL;
+}
+
+void mp_filter_set_error_handler(struct mp_filter *f, struct mp_filter *handler)
+{
+ f->in->error_handler = handler;
+}
+
+void mp_filter_internal_mark_failed(struct mp_filter *f)
+{
+ while (f) {
+ f->in->failed = true;
+ if (f->in->error_handler) {
+ add_pending(f->in->error_handler);
+ break;
+ }
+ f = f->in->parent;
+ }
+}
+
+bool mp_filter_has_failed(struct mp_filter *filter)
+{
+ bool failed = filter->in->failed;
+ filter->in->failed = false;
+ return failed;
+}
+
+static void reset_pin(struct mp_pin *p)
+{
+ if (!p->conn || p->dir != MP_PIN_OUT) {
+ assert(!p->data.type);
+ assert(!p->data_requested);
+ }
+ mp_frame_unref(&p->data);
+ p->data_requested = false;
+}
+
+void mp_filter_reset(struct mp_filter *filter)
+{
+ for (int n = 0; n < filter->in->num_children; n++)
+ mp_filter_reset(filter->in->children[n]);
+
+ for (int n = 0; n < filter->num_pins; n++) {
+ struct mp_pin *p = filter->ppins[n];
+ reset_pin(p);
+ reset_pin(p->other);
+ }
+
+ if (filter->in->info->reset)
+ filter->in->info->reset(filter);
+}
+
+struct mp_pin *mp_filter_add_pin(struct mp_filter *f, enum mp_pin_dir dir,
+ const char *name)
+{
+ assert(dir == MP_PIN_IN || dir == MP_PIN_OUT);
+ assert(name && name[0]);
+ assert(!mp_filter_get_named_pin(f, name));
+
+ // "Public" pin
+ struct mp_pin *p = talloc_ptrtype(NULL, p);
+ *p = (struct mp_pin){
+ .name = talloc_strdup(p, name),
+ .dir = dir,
+ .owner = f,
+ .manual_connection = f->in->parent,
+ };
+
+ // "Private" paired pin
+ p->other = talloc_ptrtype(NULL, p);
+ *p->other = (struct mp_pin){
+ .name = p->name,
+ .dir = p->dir == MP_PIN_IN ? MP_PIN_OUT : MP_PIN_IN,
+ .owner = f,
+ .other = p,
+ .manual_connection = f,
+ };
+
+ MP_TARRAY_GROW(f, f->pins, f->num_pins);
+ MP_TARRAY_GROW(f, f->ppins, f->num_pins);
+ f->pins[f->num_pins] = p;
+ f->ppins[f->num_pins] = p->other;
+ f->num_pins += 1;
+
+ init_connection(p);
+
+ return p->other;
+}
+
+void mp_filter_remove_pin(struct mp_filter *f, struct mp_pin *p)
+{
+ if (!p)
+ return;
+
+ assert(p->owner == f);
+ mp_pin_disconnect(p);
+ mp_pin_disconnect(p->other);
+
+ int index = -1;
+ for (int n = 0; n < f->num_pins; n++) {
+ if (f->ppins[n] == p) {
+ index = n;
+ break;
+ }
+ }
+ assert(index >= 0);
+
+ talloc_free(f->pins[index]);
+ talloc_free(f->ppins[index]);
+
+ int count = f->num_pins;
+ MP_TARRAY_REMOVE_AT(f->pins, count, index);
+ count = f->num_pins;
+ MP_TARRAY_REMOVE_AT(f->ppins, count, index);
+ f->num_pins -= 1;
+}
+
+bool mp_filter_command(struct mp_filter *f, struct mp_filter_command *cmd)
+{
+ return f->in->info->command ? f->in->info->command(f, cmd) : false;
+}
+
+struct mp_stream_info *mp_filter_find_stream_info(struct mp_filter *f)
+{
+ while (f) {
+ if (f->stream_info)
+ return f->stream_info;
+ f = f->in->parent;
+ }
+ return NULL;
+}
+
+struct AVBufferRef *mp_filter_load_hwdec_device(struct mp_filter *f, int avtype)
+{
+ struct mp_stream_info *info = mp_filter_find_stream_info(f);
+ if (!info || !info->hwdec_devs)
+ return NULL;
+
+ hwdec_devices_request_all(info->hwdec_devs);
+
+ return hwdec_devices_get_lavc(info->hwdec_devs, avtype);
+}
+
+static void filter_wakeup(struct mp_filter *f, bool mark_only)
+{
+ struct filter_runner *r = f->in->runner;
+ pthread_mutex_lock(&r->async_lock);
+ if (!f->in->async_pending) {
+ f->in->async_pending = true;
+ // (not using a talloc parent for thread safety reasons)
+ MP_TARRAY_APPEND(NULL, r->async_pending, r->num_async_pending, f);
+ if (!mark_only && !r->async_wakeup_sent) {
+ if (r->wakeup_cb)
+ r->wakeup_cb(r->wakeup_ctx);
+ r->async_wakeup_sent = true;
+ }
+ }
+ pthread_mutex_unlock(&r->async_lock);
+}
+
+void mp_filter_wakeup(struct mp_filter *f)
+{
+ filter_wakeup(f, false);
+}
+
+void mp_filter_mark_async_progress(struct mp_filter *f)
+{
+ filter_wakeup(f, true);
+}
+
+void mp_filter_free_children(struct mp_filter *f)
+{
+ while(f->in->num_children)
+ talloc_free(f->in->children[0]);
+}
+
+static void filter_destructor(void *p)
+{
+ struct mp_filter *f = p;
+ struct filter_runner *r = f->in->runner;
+
+ if (f->in->info->destroy)
+ f->in->info->destroy(f);
+
+ // For convenience, free child filters.
+ mp_filter_free_children(f);
+
+ while (f->num_pins)
+ mp_filter_remove_pin(f, f->ppins[0]);
+
+ // Just make sure the filter is not still in the async notifications set.
+ // There will be no more new notifications at this point (due to destroy()).
+ flush_async_notifications(r, false);
+
+ for (int n = 0; n < r->num_pending; n++) {
+ if (r->pending[n] == f) {
+ MP_TARRAY_REMOVE_AT(r->pending, r->num_pending, n);
+ break;
+ }
+ }
+
+ if (f->in->parent) {
+ struct mp_filter_internal *p_in = f->in->parent->in;
+ for (int n = 0; n < p_in->num_children; n++) {
+ if (p_in->children[n] == f) {
+ MP_TARRAY_REMOVE_AT(p_in->children, p_in->num_children, n);
+ break;
+ }
+ }
+ }
+
+ if (r->root_filter == f) {
+ assert(!f->in->parent);
+ pthread_mutex_destroy(&r->async_lock);
+ talloc_free(r->async_pending);
+ talloc_free(r);
+ }
+}
+
+
+struct mp_filter *mp_filter_create_with_params(struct mp_filter_params *params)
+{
+ struct mp_filter *f = talloc(NULL, struct mp_filter);
+ talloc_set_destructor(f, filter_destructor);
+ *f = (struct mp_filter){
+ .priv = params->info->priv_size ?
+ talloc_zero_size(f, params->info->priv_size) : NULL,
+ .global = params->global,
+ .in = talloc(f, struct mp_filter_internal),
+ };
+ *f->in = (struct mp_filter_internal){
+ .info = params->info,
+ .parent = params->parent,
+ .runner = params->parent ? params->parent->in->runner : NULL,
+ };
+
+ if (!f->in->runner) {
+ assert(params->global);
+
+ f->in->runner = talloc(NULL, struct filter_runner);
+ *f->in->runner = (struct filter_runner){
+ .global = params->global,
+ .root_filter = f,
+ };
+ pthread_mutex_init(&f->in->runner->async_lock, NULL);
+ }
+
+ if (!f->global)
+ f->global = f->in->runner->global;
+
+ if (f->in->parent) {
+ struct mp_filter_internal *parent = f->in->parent->in;
+ MP_TARRAY_APPEND(parent, parent->children, parent->num_children, f);
+ }
+
+ f->log = mp_log_new(f, f->global->log, params->info->name);
+
+ if (f->in->info->init) {
+ if (!f->in->info->init(f, params)) {
+ talloc_free(f);
+ return NULL;
+ }
+ }
+
+ return f;
+}
+
+struct mp_filter *mp_filter_create(struct mp_filter *parent,
+ const struct mp_filter_info *info)
+{
+ assert(parent);
+ assert(info);
+ struct mp_filter_params params = {
+ .info = info,
+ .parent = parent,
+ };
+ return mp_filter_create_with_params(&params);
+}
+
+// (the root filter is just a dummy filter - nothing special about it, except
+// that it has no parent, and serves as manual connection for "external" pins)
+static const struct mp_filter_info filter_root = {
+ .name = "root",
+};
+
+struct mp_filter *mp_filter_create_root(struct mpv_global *global)
+{
+ struct mp_filter_params params = {
+ .info = &filter_root,
+ .global = global,
+ };
+ return mp_filter_create_with_params(&params);
+}
+
+void mp_filter_root_set_wakeup_cb(struct mp_filter *root,
+ void (*wakeup_cb)(void *ctx), void *ctx)
+{
+ struct filter_runner *r = root->in->runner;
+ pthread_mutex_lock(&r->async_lock);
+ r->wakeup_cb = wakeup_cb;
+ r->wakeup_ctx = ctx;
+ pthread_mutex_unlock(&r->async_lock);
+}
+
+static const char *filt_name(struct mp_filter *f)
+{
+ return f ? f->in->info->name : "-";
+}
+
+static void dump_pin_state(struct mp_filter *f, struct mp_pin *pin)
+{
+ MP_WARN(f, " [%p] %s %s c=%s[%p] f=%s[%p] m=%s[%p] %s %s %s\n",
+ pin, pin->name, pin->dir == MP_PIN_IN ? "->" : "<-",
+ pin->user_conn ? filt_name(pin->user_conn->owner) : "-", pin->user_conn,
+ pin->conn ? filt_name(pin->conn->owner) : "-", pin->conn,
+ filt_name(pin->manual_connection), pin->manual_connection,
+ pin->within_conn ? "(within)" : "",
+ pin->data_requested ? "(request)" : "",
+ mp_frame_type_str(pin->data.type));
+}
+
+void mp_filter_dump_states(struct mp_filter *f)
+{
+ MP_WARN(f, "%s[%p] (%s[%p])\n", filt_name(f), f,
+ filt_name(f->in->parent), f->in->parent);
+ for (int n = 0; n < f->num_pins; n++) {
+ dump_pin_state(f, f->pins[n]);
+ dump_pin_state(f, f->ppins[n]);
+ }
+
+ for (int n = 0; n < f->in->num_children; n++)
+ mp_filter_dump_states(f->in->children[n]);
+}
diff --git a/filters/filter.h b/filters/filter.h
new file mode 100644
index 0000000000..3fd8af9195
--- /dev/null
+++ b/filters/filter.h
@@ -0,0 +1,379 @@
+#pragma once
+
+#include <stdbool.h>
+
+#include "frame.h"
+
+struct mpv_global;
+struct mp_filter;
+
+// A filter input or output. These always come in pairs: one mp_pin is for
+// input, the other is for output. (The separation is mostly for checking
+// their API use, and for the connection functions.)
+// Effectively, this is a 1-frame queue. The data flow rules have the goal to
+// reduce the number of buffered frames and the amount of time they are
+// buffered.
+// A mp_pin must be connected to be usable. The default state of a mp_pin is
+// a manual connection, which means you use the mp_pin_*() functions to
+// manually read or write data.
+struct mp_pin;
+
+enum mp_pin_dir {
+ MP_PIN_INVALID = 0, // used as a placeholder value
+ MP_PIN_IN, // you write data to the pin
+ MP_PIN_OUT, // you read data from the pin
+};
+
+// The established direction for this pin. The direction of a pin is immutable.
+// You must use the mp_pin_in_*() and mp_pin_out_*() functions on the correct
+// pin type - mismatching it is an API violation.
+enum mp_pin_dir mp_pin_get_dir(struct mp_pin *p);
+
+// True if a new frame should be written to the pin.
+bool mp_pin_in_needs_data(struct mp_pin *p);
+
+// Write a frame to the pin. If the input was not accepted, false is returned
+// (does not normally happen, as long as mp_pin_in_needs_data() returned true).
+// The callee owns the reference to the frame data, even on failure.
+// Writing a MP_FRAME_NONE has no effect (and returns false).
+bool mp_pin_in_write(struct mp_pin *p, struct mp_frame frame);
+
+// True if a frame is actually available for reading right now, and
+// mp_pin_out_read() will return success. If this returns false, the pin is
+// flagged for needing data (the filter might either produce output the next
+// time it's run, or request new input).
+// You should call this only if you can immediately consume the data. The goal
+// is to have no redundant buffering in the filter graph, and leaving frames
+// buffered in mp_pins goes against this.
+bool mp_pin_out_request_data(struct mp_pin *p);
+
+// Same as mp_pin_out_request_data(), but does not attempt to procure new frames
+// if the return value is false.
+bool mp_pin_out_has_data(struct mp_pin *p);
+
+// Read a frame. Returns MP_FRAME_NONE if currently no frame is available.
+// You need to call mp_pin_out_request_data() and wait until the frame is ready
+// to be sure this returns a frame. (This call implicitly calls _request if no
+// frame is available, but to get proper data flow in filters, you should
+// probably follow the preferred conventions.)
+// If no frame is returned, a frame is automatically requested via
+// mp_pin_out_request_data() (so it might be retuned in the future).
+// If a frame is returned, no new frame is automatically requested (this is
+// usually not wanted, because it could lead to additional buffering).
+// This is guaranteed to return a non-NONE frame if mp_pin_out_has_data()
+// returned true and no other filter functions were called.
+// The caller owns the reference to the returned data.
+struct mp_frame mp_pin_out_read(struct mp_pin *p);
+
+// Undo mp_pin_out_read(). This should be only used in special cases. Normally,
+// you should make an effort to reduce buffering, which means you signal that
+// you need a frame only once you know that you can use it (meaning you'll
+// really use it and have no need to "undo" the read). But in special cases,
+// especially if the behavior depends on the exact frame data, using this might
+// be justified.
+// If this is called, the next mp_pin_out_read() call will return the same frame
+// again. You must not have called mp_pin_out_request_data() on this pin and
+// you must not have disconnected or changed the pin in any way.
+// This does not mark the filter for progress, i.e. the filter's process()
+// function won't be repeated (unless other pins change). If you really need
+// that, call mp_filter_internal_mark_progress() manually in addition.
+void mp_pin_out_unread(struct mp_pin *p, struct mp_frame frame);
+
+// A helper to make draining on MP_FRAME_EOF frames easier. For filters which
+// buffer data, but have no easy way to buffer MP_FRAME_EOF frames natively.
+// This is to be used as follows:
+// 1. caller receives MP_FRAME_EOF
+// 2. initiates draining (or continues, see step 4.)
+// 2b. if there are no more buffered frames, just propagates the EOF frame and
+// exits
+// 3. calls mp_pin_out_repeat_eof(pin)
+// 4. returns a buffered frame normally, and continues normally
+// 4b. pin returns "repeated" MP_FRAME_EOF, jump to 1.
+// 5. if there's nothing more to do, stop
+// 5b. there might be a sporadic wakeup, and an unwanted wait for output (in
+// a typical filter implementation)
+// You must not have requested data before calling this. (Usually you'd call
+// this after mp_pin_out_read(). Requesting data after queuing the repeat EOF
+// is OK and idempotent.)
+// This is equivalent to mp_pin_out_unread(p, MP_EOF_FRAME). See that function
+// for further remarks.
+void mp_pin_out_repeat_eof(struct mp_pin *p);
+
+// Trivial helper to determine whether src is readable and dst is writable right
+// now. Defers or requests new data if not ready. This means it has the side
+// effect of telling the filters that you want to transfer data.
+// You use this in a filter process() function. If the result is false, it will
+// have requested new output from src, and your process() function will be
+// called again once src has output and dst is accepts input (the latest).
+bool mp_pin_can_transfer_data(struct mp_pin *dst, struct mp_pin *src);
+
+// Trivial helper to copy data between two manual pins. This uses filter data
+// flow - so if data can't be copied, it requests the pins to make it possible
+// on the next filter run. This implies you call this either from a filter
+// process() function, or call it manually when needed. Also see
+// mp_pin_can_transfer_data(). Returns whether a transfer happened.
+bool mp_pin_transfer_data(struct mp_pin *dst, struct mp_pin *src);
+
+// Connect src and dst, for automatic data flow. pin a will reflect the request
+// state of pin src, and accept and pass down frames to dst when appropriate.
+// src must be MP_PIN_OUT, dst must be MP_PIN_IN.
+// Previous connections are always removed. If the pins were already connected,
+// no action is taken.
+// Creating circular connections will just cause infinite recursion or such.
+// Both API user and filter implementations can use this, but always only on
+// the pins they're allowed to access.
+void mp_pin_connect(struct mp_pin *dst, struct mp_pin *src);
+
+// Enable manual filter access. This means you want to directly use the
+// mp_pin_in*() and mp_pin_out_*() functions for data flow.
+// Always severs previous connections.
+void mp_pin_set_manual_connection(struct mp_pin *p, bool connected);
+
+// Enable manual filter access, like mp_pin_set_manual_connection(). In
+// addition, this specifies which filter's process function should be invoked
+// on pin state changes. Using mp_pin_set_manual_connection() will default to
+// the parent filter for this.
+// Passing f=NULL disconnects.
+void mp_pin_set_manual_connection_for(struct mp_pin *p, struct mp_filter *f);
+
+// Return the manual connection for this pin, or NULL if none.
+struct mp_filter *mp_pin_get_manual_connection(struct mp_pin *p);
+
+// If not connected, this will produce EOF for MP_PIN_IN, and never request
+// data for MP_PIN_OUT.
+void mp_pin_disconnect(struct mp_pin *p);
+
+// Return whether a connection was set on this pin. Note that this is not
+// transitive (if the pin is connected to an pin with no further connections,
+// there is no active connection, but this still returns true).
+bool mp_pin_is_connected(struct mp_pin *p);
+
+// Return a symbolic name of the pin. Usually it will be something redundant
+// (like "in" or "out"), or something the user set.
+// The returned pointer is valid as long as the mp_pin is allocated.
+const char *mp_pin_get_name(struct mp_pin *p);
+
+/**
+ * A filter converts input frames to output frames (mp_frame, usually audio or
+ * video data). It can support multiple inputs and outputs. Data always flows
+ * through mp_pin instances.
+ *
+ * --- General rules for data flow:
+ *
+ * All data goes through mp_pin (present in the mp_filter inputs/outputs list).
+ * Actual work is done in the filter's process() function. This function
+ * queries whether input mp_pins have data and output mp_pins require data. If
+ * both is the case, a frame is read, filtered, and written to the output.
+ * Depending on the filter type, the filter might internally buffer data (e.g.
+ * things that require readahead). But in general, a filter should not request
+ * input before output is needed.
+ *
+ * The general goal is to reduce the amount of data buffered. If buffering is
+ * actually desired, explicit filters for buffering have to be introduced into
+ * the filter chain.
+ *
+ * Multiple filters are driven by letting mp_pin flag filters which need
+ * process() to be called. The process starts by requesting output from the
+ * last filter. The requests will "bubble up" by iteratively calling process()
+ * on each filter, which will request further input, until input on the first
+ * filter's input pin is requested. The API user feeds it a frame, which will
+ * call the first filter's process() function, which will filter and output
+ * the frame, and the frame is iteratively filtered until it reaches the output.
+ * (Depending on implementation, some if this wil be recursive not iterative.)
+ *
+ * --- General rules for thread safety:
+ *
+ * Filters are by default not thread safe. However, some filters can be
+ * partially thread safe and allow certain functions to be accessed from
+ * foreign threads. The common filter code itself is not thread safe, except
+ * for some utility functions explicitly marked as such, and which are meant
+ * to make implementing threaded filters easier.
+ *
+ * --- Rules for manual connections:
+ *
+ * A pin can be marked for manual connection via mp_pin_set_manual_connection().
+ * It's also the default. These have two uses:
+ *
+ * 1. filter internal (the filter actually does something with a frame)
+ * 2. filter user manually feeding/retrieving frames
+ *
+ * Basically, a manual connection means someone uses the mp_pin_in_*() or
+ * mp_pin_out_*() functions on a pin. The alternative is an automatic connection
+ * made via mp_pin_connect(). Manual connections need special considerations
+ * for wakeups:
+ *
+ * Internal manual pins (within a filter) will invoke the filter's process()
+ * function, and the filter polls the state of all pins to see if anything
+ * needs to be filtered or requested.
+ *
+ * External manual pins (filter user) require the user to poll all manual pins
+ * that are part of the graph. In addition, the filter's wakeup callback must be
+ * set, and trigger repolling all pins. This is needed in case any filters do
+ * async filtering internally.
+ *
+ * --- Rules for filters with multiple inputs or outputs:
+ *
+ * The generic filter code does not do any kind of scheduling. It's the filter's
+ * responsibility to request frames from input when needed, and to avoid
+ * internal excessive buffering if outputs aren't read.
+ *
+ * --- Rules for async filters:
+ *
+ * Async filters will have a synchronous interface with asynchronous waiting.
+ * They change mp_pin data flow to being poll based, with a wakeup mechanism to
+ * avoid active waiting. Once polling results in no change, the API user can go
+ * to sleep, and wait until the wakeup callback set via mp_filter_create_root()
+ * is invoked. Then it can poll the filters again. Internally, filters use
+ * mp_filter_wakeup() to get their process() function invoked on the user
+ * thread, and update the mp_pin states.
+ *
+ * For running parts of a filter graph on a different thread, f_async_queue.h
+ * can be used.
+ *
+ * --- Format conversions and mid-stream format changes:
+ *
+ * Generally, all filters must support all formats, as well as mid-stream
+ * format changes. If they don't, they will have to error out. There are some
+ * helpers for dealing with these two things.
+ *
+ * mp_pin_out_unread() can temporarily put back an input frame. If the input
+ * format changed, and you have to drain buffered data, you can put back the
+ * frame every time you output a buffered frame. Once all buffered data is
+ * drained this way, you can actually change the internal filter state to the
+ * new format, and actually consume the input frame.
+ *
+ * There is an f_autoconvert filter, which lets you transparently convert to
+ * a set of target formats (and which passes through the data if no conversion
+ * is needed).
+ *
+ * --- Rules for format negotiation:
+ *
+ * Since libavfilter does not provide _any_ kind of format negotiation to the
+ * user, and most filters use the libavfilter wrapper anyway, this is pretty
+ * broken and rudimentary. (The only thing libavfilter provides is that you
+ * can try to create a filter with a specific input format. Then you get
+ * either failure, or an output format. It involves actually initializing all
+ * filters, so a try run is not cheap or even side effect free.)
+ */
+struct mp_filter {
+ // Private state for the filter implementation. API users must not access
+ // this.
+ void *priv;
+
+ struct mpv_global *global;
+ struct mp_log *log;
+
+ // Array of public pins. API users can read this, but are not allowed to
+ // modify the array. Filter implementations use mp_filter_add_pin() to add
+ // pins to the array. The array is in order of the add calls.
+ // Most filters will use pins[0] for input (MP_PIN_IN), and pins[1] for
+ // output (MP_PIN_OUT). This is the default convention for filters. Some
+ // filters may have more complex usage, and assign pin entries with
+ // different meanings.
+ // The filter implementation must not use this. It must access ppins[]
+ // instead.
+ struct mp_pin **pins;
+ int num_pins;
+
+ // Internal pins, for access by the filter implementation. The meaning of
+ // in/out is swapped from the public interface: inputs use MP_PIN_OUT,
+ // because the filter reads from the inputs, and outputs use MP_PIN_IN,
+ // because the filter writes to them. ppins[n] always corresponds to pin[n],
+ // with swapped direction, and implicit data flow between the two.
+ // Outside API users must not access this.
+ struct mp_pin **ppins;
+
+ // Dumb garbage.
+ struct mp_stream_info *stream_info;
+
+ // Private state for the generic filter code.
+ struct mp_filter_internal *in;
+};
+
+// Return a symbolic name, which is set at init time. NULL if no name.
+// Valid until filter is destroyed or next mp_filter_set_name() call.
+const char *mp_filter_get_name(struct mp_filter *f);
+
+// Change mp_filter_get_name() return value.
+void mp_filter_set_name(struct mp_filter *f, const char *name);
+
+// Get a pin from f->pins[] for which mp_pin_get_name() returns the same name.
+// If name is NULL, always return NULL.
+struct mp_pin *mp_filter_get_named_pin(struct mp_filter *f, const char *name);
+
+// Return true if the filter has failed in some fatal way that does not allow
+// it to continue. This resets the error state (but does not reset the child
+// failed status on any parent filter).
+bool mp_filter_has_failed(struct mp_filter *filter);
+
+// Invoke mp_filter_info.reset on this filter and all children (but not
+// other filters connected via pins).
+void mp_filter_reset(struct mp_filter *filter);
+
+enum mp_filter_command_type {
+ MP_FILTER_COMMAND_TEXT = 1,
+ MP_FILTER_COMMAND_GET_META,
+};
+
+struct mp_filter_command {
+ enum mp_filter_command_type type;
+
+ // For MP_FILTER_COMMAND_TEXT
+ const char *cmd;
+ const char *arg;
+
+ // For MP_FILTER_COMMAND_GET_META
+ void *res; // must point to struct mp_tags*, will be set to new instance
+};
+
+// Run a command on the filter. Returns success. For libavfilter.
+bool mp_filter_command(struct mp_filter *f, struct mp_filter_command *cmd);
+
+// Specific information about a sub-tree in a filter graph. Currently, this is
+// mostly used to give filters access to VO mechanisms and capabilities.
+struct mp_stream_info {
+ void *priv; // for use by whoever implements the callbacks
+
+ double (*get_display_fps)(struct mp_stream_info *i);
+ double (*get_container_fps)(struct mp_stream_info *i);
+
+ struct mp_hwdec_devices *hwdec_devs;
+ struct osd_state *osd;
+ bool rotate90;
+};
+
+// Search for a parent filter (including f) that has this set, and return it.
+struct mp_stream_info *mp_filter_find_stream_info(struct mp_filter *f);
+
+struct AVBufferRef;
+struct AVBufferRef *mp_filter_load_hwdec_device(struct mp_filter *f, int avtype);
+
+// Perform filtering. This runs until the filter graph is blocked (due to
+// missing external input or unread output). It returns whether any outside
+// pins have changed state.
+// Does not perform recursive filtering to connected filters with different
+// root filter, though it notifies them.
+bool mp_filter_run(struct mp_filter *f);
+
+// Create a root dummy filter with no inputs or outputs. This fulfills the
+// following functions:
+// - passing it as parent filter to top-level filters
+// - driving the filter loop between the shared filters
+// - setting the wakeup callback for async filtering
+// - implicitly passing down global data like mpv_global and keeping filter
+// constructor functions simple
+// Note that you can still connect pins of filters with different parents or
+// root filters, but then you may have to manually invoke mp_filter_run() on
+// the root filters of the connected filters to drive data flow.
+struct mp_filter *mp_filter_create_root(struct mpv_global *global);
+
+// Asynchronous filters may need to wakeup the user thread if the status of any
+// mp_pin has changed. If this is called, the callback provider should get the
+// user's thread to call mp_filter_run() again.
+// The wakeup callback must not recursively call into any filter APIs, or do
+// blocking waits on the filter API (deadlocks will happen).
+void mp_filter_root_set_wakeup_cb(struct mp_filter *root,
+ void (*wakeup_cb)(void *ctx), void *ctx);
+
+// Debugging internal stuff.
+void mp_filter_dump_states(struct mp_filter *f);
diff --git a/filters/filter_internal.h b/filters/filter_internal.h
new file mode 100644
index 0000000000..bd901db7d8
--- /dev/null
+++ b/filters/filter_internal.h
@@ -0,0 +1,144 @@
+#pragma once
+
+#include <stddef.h>
+
+#include "filter.h"
+
+// Flag the thread as needing mp_filter_process() to be called. Useful for
+// (some) async filters only. Idempotent.
+// Explicitly thread-safe.
+void mp_filter_wakeup(struct mp_filter *f);
+
+// Same as mp_filter_wakeup(), but skip the wakeup, and only mark the filter
+// as requiring processing to possibly update pin states changed due to async
+// processing.
+// Explicitly thread-safe.
+void mp_filter_mark_async_progress(struct mp_filter *f);
+
+// Flag the thread as needing mp_filter_process() to be called. Unlike
+// mp_filter_wakeup(), not thread-safe, and must be called from the process()
+// function of f (in exchange this is very light-weight).
+// In practice, this means process() is repeated.
+void mp_filter_internal_mark_progress(struct mp_filter *f);
+
+// Flag the filter as having failed, and propagate the error to the parent
+// filter. The error propagation stops either at the root filter, or if a filter
+// has an error handler set.
+// Must be called from f's process function.
+void mp_filter_internal_mark_failed(struct mp_filter *f);
+
+// If handler is not NULL, then if filter f errors, don't propagate the error
+// flag to its parent. Also invoke the handler's process() function, which is
+// supposed to use mp_filter_has_failed(f) to check any filters for which it has
+// set itself as error handler.
+// A filter must manually unset itself as error handler if it gets destroyed
+// before the filter f, otherwise dangling pointers will occur.
+void mp_filter_set_error_handler(struct mp_filter *f, struct mp_filter *handler);
+
+// Add a pin. Returns the private handle (same as f->ppins[f->num_pins-1]).
+// The name must be unique across all filter pins (you must verify this
+// yourself if filter names are from user input). name=="" is not allowed.
+// Never returns NULL. dir should be the external filter direction (a filter
+// input will use dir==MP_PIN_IN, and the returned pin will use MP_PIN_OUT,
+// because the internal pin is the opposite end of the external one).
+struct mp_pin *mp_filter_add_pin(struct mp_filter *f, enum mp_pin_dir dir,
+ const char *name);
+
+// Remove and deallocate a pin. The caller must be sure that nothing else
+// references the pin anymore. You must pass the private pin (from
+// mp_filter.ppin). This removes/deallocates the public paired pin as well.
+void mp_filter_remove_pin(struct mp_filter *f, struct mp_pin *p);
+
+// Free all filters which have f as parent. (This has nothing to do with
+// talloc.)
+void mp_filter_free_children(struct mp_filter *f);
+
+struct mp_filter_params;
+
+struct mp_filter_info {
+ // Informational name, in some cases might be used for filter discovery.
+ const char *name;
+
+ // mp_filter.priv is set to memory allocated with this size (if > 0)
+ size_t priv_size;
+
+ // Called during mp_filter_create(). Optional, can be NULL if use of a
+ // constructor function is required, which sets up the real filter after
+ // creation. Actually turns out nothing uses this.
+ bool (*init)(struct mp_filter *f, struct mp_filter_params *params);
+
+ // Free internal resources. Optional.
+ void (*destroy)(struct mp_filter *f);
+
+ // Called if any mp_pin was signalled (i.e. likely new data to process), or
+ // an async wakeup was received some time earlier.
+ // Generally, the implementation would consist of 2 stages:
+ // 1. check for the pin states, possibly request/probe for input/output
+ // 2. if data flow can happen, read a frame, perform actual work, write
+ // result
+ // The process function will usually run very often, when pin states are
+ // updated, so the generic code can determine where data flow can happen.
+ // The common case will be that process() is called running stage 1 a bunch
+ // of times, until it finally can run stage 2 too.
+ // Optional.
+ void (*process)(struct mp_filter *f);
+
+ // Clear internal state and buffers (e.g. on seeks). Filtering can restart
+ // after this, and all settings are preserved. It makes sense to preserve
+ // internal resources for further filtering as well if you can.
+ // Input/output pins are always cleared by the common code before invoking
+ // this callback.
+ // Optional, can be NULL for filters without state.
+ // Don't create or destroy filters in this function, don't reconnect pins,
+ // don't access pins.
+ void (*reset)(struct mp_filter *f);
+
+ // Send a command to the filter. Highly implementation specific, usually
+ // user-initiated. Optional.
+ bool (*command)(struct mp_filter *f, struct mp_filter_command *cmd);
+};
+
+// Create a filter instance. Returns NULL on failure.
+// Destroy/free with talloc_free().
+// This is for filter implementers only. Filters are created with their own
+// constructor functions (instead of a generic one), which call this function
+// to create the filter itself.
+// parent is never NULL; use mp_filter_create_root() to create a top most
+// filter.
+// The parent does not imply anything about the position of the filter in
+// the dataflow (only the mp_pin connections matter). The parent exists for
+// convenience, which includes:
+// - passing down implicit and explicit parameters (such as the filter driver
+// loop)
+// - auto freeing child filters if the parent is free'd
+// - propagating errors
+// - setting the parent as default manual connection for new external filter
+// pins
+// The parent filter stays valid for the lifetime of any filters having it
+// directly or indirectly as parent. If the parent is free'd, all children are
+// automatically free'd.
+// All filters in the same parent tree must be driven in the same thread (or be
+// explicitly synchronized otherwise).
+// Driving the parent (or root) filter with mp_filter_run() will make sure this
+// filter is driven too, without having to resort to recursion.
+struct mp_filter *mp_filter_create(struct mp_filter *parent,
+ const struct mp_filter_info *info);
+
+struct mp_filter_params {
+ // Identifies the filter and its implementation. The pointer must stay
+ // valid for the life time of the created filter instance.
+ const struct mp_filter_info *info;
+
+ // Must be set if global==NULL. See mp_filter_create() for remarks.
+ struct mp_filter *parent;
+
+ // Must be set if parent==NULL, can otherwise be NULL.
+ struct mpv_global *global;
+
+ // Filter specific parameters. Most filters will have a constructor
+ // function, and pass in something internal.
+ void *params;
+};
+
+// Same as mp_filter_create(), but technically more flexible.
+struct mp_filter *mp_filter_create_with_params(struct mp_filter_params *params);
diff --git a/filters/frame.c b/filters/frame.c
new file mode 100644
index 0000000000..6c5b28e77a
--- /dev/null
+++ b/filters/frame.c
@@ -0,0 +1,179 @@
+#include <libavutil/frame.h>
+
+#include "audio/aframe.h"
+#include "common/av_common.h"
+#include "video/mp_image.h"
+
+#include "frame.h"
+
+struct frame_handler {
+ const char *name;
+ bool is_data;
+ bool is_signaling;
+ void *(*new_ref)(void *data);
+ // The following must be non-NULL if new_ref is non-NULL.
+ double (*get_pts)(void *data);
+ void (*set_pts)(void *data, double pts);
+ AVFrame *(*new_av_ref)(void *data);
+ void *(*from_av_ref)(AVFrame *data);
+ void (*free)(void *data);
+};
+
+static void *video_ref(void *data)
+{
+ return mp_image_new_ref(data);
+}
+
+static double video_get_pts(void *data)
+{
+ return ((struct mp_image *)data)->pts;
+}
+
+static void video_set_pts(void *data, double pts)
+{
+ ((struct mp_image *)data)->pts = pts;
+}
+
+static AVFrame *video_new_av_ref(void *data)
+{
+ return mp_image_to_av_frame(data);
+}
+
+static void *video_from_av_ref(AVFrame *data)
+{
+ return mp_image_from_av_frame(data);
+}
+
+static void *audio_ref(void *data)
+{
+ return mp_aframe_new_ref(data);
+}
+
+static double audio_get_pts(void *data)
+{
+ return mp_aframe_get_pts(data);
+}
+
+static void audio_set_pts(void *data, double pts)
+{
+ mp_aframe_set_pts(data, pts);
+}
+
+static AVFrame *audio_new_av_ref(void *data)
+{
+ return mp_aframe_to_avframe(data);
+}
+
+static void *audio_from_av_ref(AVFrame *data)
+{
+ return mp_aframe_from_avframe(data);
+}
+
+static const struct frame_handler frame_handlers[] = {
+ [MP_FRAME_NONE] = {
+ .name = "none",
+ },
+ [MP_FRAME_EOF] = {
+ .name = "eof",
+ .is_signaling = true,
+ },
+ [MP_FRAME_VIDEO] = {
+ .name = "video",
+ .is_data = true,
+ .new_ref = video_ref,
+ .get_pts = video_get_pts,
+ .set_pts = video_set_pts,
+ .new_av_ref = video_new_av_ref,
+ .from_av_ref = video_from_av_ref,
+ .free = talloc_free,
+ },
+ [MP_FRAME_AUDIO] = {
+ .name = "audio",
+ .is_data = true,
+ .new_ref = audio_ref,
+ .get_pts = audio_get_pts,
+ .set_pts = audio_set_pts,
+ .new_av_ref = audio_new_av_ref,
+ .from_av_ref = audio_from_av_ref,
+ .free = talloc_free,
+ },
+};
+
+const char *mp_frame_type_str(enum mp_frame_type t)
+{
+ return frame_handlers[t].name;
+}
+
+bool mp_frame_is_data(struct mp_frame frame)
+{
+ return frame_handlers[frame.type].is_data;
+}
+
+bool mp_frame_is_signaling(struct mp_frame frame)
+{
+ return frame_handlers[frame.type].is_signaling;
+}
+
+void mp_frame_unref(struct mp_frame *frame)
+{
+ if (!frame)
+ return;
+
+ if (frame_handlers[frame->type].free)
+ frame_handlers[frame->type].free(frame->data);
+
+ *frame = (struct mp_frame){0};
+}
+
+struct mp_frame mp_frame_ref(struct mp_frame frame)
+{
+ if (frame_handlers[frame.type].new_ref) {
+ assert(frame.data);
+ frame.data = frame_handlers[frame.type].new_ref(frame.data);
+ if (!frame.data)
+ frame.type = MP_FRAME_NONE;
+ }
+ return frame;
+}
+
+double mp_frame_get_pts(struct mp_frame frame)
+{
+ if (frame_handlers[frame.type].get_pts)
+ return frame_handlers[frame.type].get_pts(frame.data);
+ return MP_NOPTS_VALUE;
+}
+
+void mp_frame_set_pts(struct mp_frame frame, double pts)
+{
+ if (frame_handlers[frame.type].get_pts)
+ frame_handlers[frame.type].set_pts(frame.data, pts);
+}
+
+AVFrame *mp_frame_to_av(struct mp_frame frame, struct AVRational *tb)
+{
+ if (!frame_handlers[frame.type].new_av_ref)
+ return NULL;
+
+ AVFrame *res = frame_handlers[frame.type].new_av_ref(frame.data);
+ if (!res)
+ return NULL;
+
+ res->pts = mp_pts_to_av(mp_frame_get_pts(frame), tb);
+ return res;
+}
+
+struct mp_frame mp_frame_from_av(enum mp_frame_type type, struct AVFrame *frame,
+ struct AVRational *tb)
+{
+ struct mp_frame res = {type};
+
+ if (!frame_handlers[res.type].from_av_ref)
+ return MP_NO_FRAME;
+
+ res.data = frame_handlers[res.type].from_av_ref(frame);
+ if (!res.data)
+ return MP_NO_FRAME;
+
+ mp_frame_set_pts(res, mp_pts_from_av(frame->pts, tb));
+ return res;
+}
diff --git a/filters/frame.h b/filters/frame.h
new file mode 100644
index 0000000000..135920dea7
--- /dev/null
+++ b/filters/frame.h
@@ -0,0 +1,55 @@
+#pragma once
+
+#include <stdbool.h>
+
+enum mp_frame_type {
+ MP_FRAME_NONE = 0, // NULL, placeholder, no frame available (_not_ EOF)
+ MP_FRAME_VIDEO, // struct mp_image*
+ MP_FRAME_AUDIO, // struct mp_aframe*
+ MP_FRAME_EOF, // NULL, signals end of stream (but frames after it can
+ // resume filtering!)
+};
+
+const char *mp_frame_type_str(enum mp_frame_type t);
+
+// Generic container for a piece of data, such as a video frame, or a collection
+// of audio samples. Wraps an actual media-specific frame data types in a
+// generic way. Also can be an empty frame for signaling (MP_FRAME_EOF and
+// possibly others).
+// This struct is usually allocated on the stack and can be copied by value.
+// You need to consider that the underlying pointer is ref-counted, and that
+// the _unref/_ref functions must be used accordingly.
+struct mp_frame {
+ enum mp_frame_type type;
+ void *data;
+};
+
+// Return whether the frame contains actual data (audio, video, ...). If false,
+// it's either signaling, or MP_FRAME_NONE.
+bool mp_frame_is_data(struct mp_frame frame);
+
+// Return whether the frame is for signaling (data flow commands like
+// MP_FRAME_EOF). If false, it's either data (mp_frame_is_data()), or
+// MP_FRAME_NONE.
+bool mp_frame_is_signaling(struct mp_frame frame);
+
+// Unreferences any frame data, and sets *frame to MP_FRAME_NONE. (It does
+// _not_ deallocate the memory block the parameter points to, only frame->data.)
+void mp_frame_unref(struct mp_frame *frame);
+
+// Return a new reference to the given frame. The caller owns the returned
+// frame. On failure returns a MP_FRAME_NONE.
+struct mp_frame mp_frame_ref(struct mp_frame frame);
+
+double mp_frame_get_pts(struct mp_frame frame);
+void mp_frame_set_pts(struct mp_frame frame, double pts);
+
+struct AVFrame;
+struct AVRational;
+struct AVFrame *mp_frame_to_av(struct mp_frame frame, struct AVRational *tb);
+struct mp_frame mp_frame_from_av(enum mp_frame_type type, struct AVFrame *frame,
+ struct AVRational *tb);
+
+#define MAKE_FRAME(type, frame) ((struct mp_frame){(type), (frame)})
+#define MP_NO_FRAME MAKE_FRAME(0, 0)
+#define MP_EOF_FRAME MAKE_FRAME(MP_FRAME_EOF, 0)
diff --git a/filters/user_filters.c b/filters/user_filters.c
new file mode 100644
index 0000000000..f59dd22d76
--- /dev/null
+++ b/filters/user_filters.c
@@ -0,0 +1,119 @@
+#include "config.h"
+
+#include "common/common.h"
+#include "common/msg.h"
+#include "options/m_config.h"
+
+#include "f_lavfi.h"
+#include "user_filters.h"
+
+static bool get_desc_from(const struct mp_user_filter_entry **list, int num,
+ struct m_obj_desc *dst, int index)
+{
+ if (index >= num)
+ return false;
+ const struct mp_user_filter_entry *entry = list[index];
+ *dst = entry->desc;
+ dst->p = entry;
+ return true;
+}
+
+// --vf option
+
+const struct mp_user_filter_entry *vf_list[] = {
+ &vf_format,
+ &vf_lavfi,
+ &vf_lavfi_bridge,
+ &vf_sub,
+#if HAVE_VAPOURSYNTH_CORE && HAVE_VAPOURSYNTH
+ &vf_vapoursynth,
+#endif
+#if HAVE_VAPOURSYNTH_CORE && HAVE_VAPOURSYNTH_LAZY
+ &vf_vapoursynth_lazy,
+#endif
+#if HAVE_VDPAU
+ &vf_vdpaupp,
+#endif
+#if HAVE_VAAPI
+ &vf_vavpp,
+#endif
+#if HAVE_D3D_HWACCEL
+ &vf_d3d11vpp,
+#endif
+};
+
+static bool get_vf_desc(struct m_obj_desc *dst, int index)
+{
+ return get_desc_from(vf_list, MP_ARRAY_SIZE(vf_list), dst, index);
+}
+
+const struct m_obj_list vf_obj_list = {
+ .get_desc = get_vf_desc,
+ .description = "video filters",
+ .allow_disable_entries = true,
+ .allow_unknown_entries = true,
+};
+
+// Create a bidir, single-media filter from command line arguments.
+struct mp_filter *mp_create_user_filter(struct mp_filter *parent,
+ enum mp_output_chain_type type,
+ const char *name, char **args)
+{
+ const struct m_obj_list *obj_list = NULL;
+ const char *defs_name = NULL;
+ enum mp_frame_type frame_type = 0;
+ if (type == MP_OUTPUT_CHAIN_VIDEO) {
+ frame_type = MP_FRAME_VIDEO;
+ obj_list = &vf_obj_list;
+ defs_name = "vf-defaults";
+ }
+ assert(frame_type && obj_list);
+
+ struct mp_filter *f = NULL;
+
+ struct m_obj_desc desc;
+ if (!m_obj_list_find(&desc, obj_list, bstr0(name))) {
+ // Generic lavfi bridge.
+ if (strncmp(name, "lavfi-", 6) == 0)
+ name += 6;
+ struct mp_lavfi *l =
+ mp_lavfi_create_filter(parent, frame_type, true, NULL, name, args);
+ if (l)
+ f = l->f;
+ goto done;
+ }
+
+ void *options = NULL;
+ if (desc.options) {
+ struct m_obj_settings *defs = NULL;
+ if (defs_name) {
+ mp_read_option_raw(parent->global, defs_name,
+ &m_option_type_obj_settings_list, &defs);
+ }
+
+ struct m_config *config =
+ m_config_from_obj_desc_and_args(NULL, parent->log, parent->global,
+ &desc, name, defs, args);
+
+ struct m_option dummy = {.type = &m_option_type_obj_settings_list};
+ m_option_free(&dummy, &defs);
+
+ if (!config)
+ goto done;
+
+ options = config->optstruct;
+ // Free config when options is freed.
+ ta_set_parent(options, NULL);
+ ta_set_parent(config, options);
+ }
+
+ const struct mp_user_filter_entry *entry = desc.p;
+ f = entry->create(parent, options);
+
+done:
+ if (!f) {
+ MP_ERR(parent, "Creating filter '%s' failed.\n", name);
+ return NULL;
+ }
+ return f;
+}
diff --git a/filters/user_filters.h b/filters/user_filters.h
new file mode 100644
index 0000000000..8e1e3e380d
--- /dev/null
+++ b/filters/user_filters.h
@@ -0,0 +1,29 @@
+#pragma once
+
+#include "options/m_option.h"
+
+#include "f_output_chain.h"
+
+// For creating filters from command line. Strictly for --vf/--af.
+struct mp_user_filter_entry {
+ // Name and sub-option description.
+ struct m_obj_desc desc;
+ // Create a filter. The option pointer is non-NULL if desc implies a priv
+ // struct to be allocated; then options are parsed into it. The callee
+ // must always free options (but can reparent it with talloc to keep it).
+ struct mp_filter *(*create)(struct mp_filter *parent, void *options);
+};
+
+struct mp_filter *mp_create_user_filter(struct mp_filter *parent,
+ enum mp_output_chain_type type,
+ const char *name, char **args);
+
+extern const struct mp_user_filter_entry vf_lavfi;
+extern const struct mp_user_filter_entry vf_lavfi_bridge;
+extern const struct mp_user_filter_entry vf_sub;
+extern const struct mp_user_filter_entry vf_vapoursynth;
+extern const struct mp_user_filter_entry vf_vapoursynth_lazy;
+extern const struct mp_user_filter_entry vf_format;
+extern const struct mp_user_filter_entry vf_vdpaupp;
+extern const struct mp_user_filter_entry vf_vavpp;
+extern const struct mp_user_filter_entry vf_d3d11vpp;
diff --git a/options/m_option.h b/options/m_option.h
index 8c8ae855d1..f47dac294e 100644
--- a/options/m_option.h
+++ b/options/m_option.h
@@ -405,7 +405,6 @@ char *format_file_size(int64_t size);
// certain groups of options.
#define UPDATE_OPT_FIRST (1 << 7)
#define UPDATE_TERM (1 << 7) // terminal options
-#define UPDATE_DEINT (1 << 8) // --deinterlace
#define UPDATE_OSD (1 << 10) // related to OSD rendering
#define UPDATE_BUILTIN_SCRIPTS (1 << 11) // osc/ytdl/stats
#define UPDATE_IMGPAR (1 << 12) // video image params overrides
diff --git a/options/options.c b/options/options.c
index 44a9571598..ce602f2683 100644
--- a/options/options.c
+++ b/options/options.c
@@ -297,6 +297,18 @@ const struct m_sub_options dvd_conf = {
};
#undef OPT_BASE_STRUCT
+#define OPT_BASE_STRUCT struct filter_opts
+
+const struct m_sub_options filter_conf = {
+ .opts = (const struct m_option[]){
+ OPT_FLAG("deinterlace", deinterlace, 0),
+ {0}
+ },
+ .size = sizeof(OPT_BASE_STRUCT),
+ .change_flags = UPDATE_IMGPAR,
+};
+
+#undef OPT_BASE_STRUCT
#define OPT_BASE_STRUCT struct MPOpts
const m_option_t mp_opts[] = {
@@ -494,7 +506,7 @@ const m_option_t mp_opts[] = {
.deprecation_message = "use --vf + enable/disable flags"),
OPT_SETTINGSLIST("vf", vf_settings, 0, &vf_obj_list, ),
- OPT_FLAG("deinterlace", deinterlace, UPDATE_DEINT),
+ OPT_SUBSTRUCT("", filter_opts, filter_conf, 0),
OPT_STRING("ad", audio_decoders, 0),
OPT_STRING("vd", video_decoders, 0),
diff --git a/options/options.h b/options/options.h
index 97f01e21f9..a3444dbd4d 100644
--- a/options/options.h
+++ b/options/options.h
@@ -281,7 +281,7 @@ typedef struct MPOpts {
int pitch_correction;
struct m_obj_settings *vf_settings, *vf_defs;
struct m_obj_settings *af_settings, *af_defs;
- int deinterlace;
+ struct filter_opts *filter_opts;
float movie_aspect;
int aspect_method;
char **sub_name;
@@ -353,6 +353,10 @@ struct dvd_opts {
char *device;
};
+struct filter_opts {
+ int deinterlace;
+};
+
extern const m_option_t mp_opts[];
extern const struct MPOpts mp_default_opts;
extern const struct m_sub_options vo_sub_opts;
@@ -360,6 +364,7 @@ extern const struct m_sub_options stream_cache_conf;
extern const struct m_sub_options dvd_conf;
extern const struct m_sub_options mp_subtitle_sub_opts;
extern const struct m_sub_options mp_osd_render_sub_opts;
+extern const struct m_sub_options filter_conf;
int hwdec_validate_opt(struct mp_log *log, const m_option_t *opt,
struct bstr name, struct bstr param);
diff --git a/player/command.c b/player/command.c
index 4c03c7579e..9020ffe158 100644
--- a/player/command.c
+++ b/player/command.c
@@ -50,7 +50,6 @@
#include "options/m_option.h"
#include "options/m_property.h"
#include "options/m_config.h"
-#include "video/filter/vf.h"
#include "video/decode/vd.h"
#include "video/out/vo.h"
#include "video/csputils.h"
@@ -1432,12 +1431,25 @@ static int mp_property_filter_metadata(void *ctx, struct m_property *prop,
char *rem;
m_property_split_path(ka->key, &key, &rem);
struct mp_tags metadata = {0};
+ void *metadata_mem = NULL;
int res = CONTROL_UNKNOWN;
if (strcmp(type, "vf") == 0) {
if (!mpctx->vo_chain)
return M_PROPERTY_UNAVAILABLE;
- struct vf_chain *vf = mpctx->vo_chain->vf;
- res = vf_control_by_label(vf, VFCTRL_GET_METADATA, &metadata, key);
+
+ struct mp_tags *metadata_ptr = NULL;
+ struct mp_filter_command cmd = {
+ .type = MP_FILTER_COMMAND_GET_META,
+ .res = &metadata_ptr,
+ };
+ char *key0 = mp_tprintf(80, "%.*s", BSTR_P(key));
+ mp_output_chain_command(mpctx->vo_chain->filter, key0, &cmd);
+
+ if (metadata_ptr) {
+ metadata = *metadata_ptr;
+ metadata_mem = metadata_ptr;
+ res = CONTROL_OK;
+ }
} else if (strcmp(type, "af") == 0) {
#if HAVE_LIBAF
if (!(mpctx->ao_chain && mpctx->ao_chain->af))
@@ -1454,11 +1466,12 @@ static int mp_property_filter_metadata(void *ctx, struct m_property *prop,
if (strlen(rem)) {
struct m_property_action_arg next_ka = *ka;
next_ka.key = rem;
- return tag_property(M_PROPERTY_KEY_ACTION, &next_ka, &metadata);
+ res = tag_property(M_PROPERTY_KEY_ACTION, &next_ka, &metadata);
} else {
- return tag_property(ka->action, ka->arg, &metadata);
+ res = tag_property(ka->action, ka->arg, &metadata);
}
- return M_PROPERTY_OK;
+ talloc_free(metadata_mem);
+ return res;
default:
return M_PROPERTY_ERROR;
}
@@ -2601,10 +2614,10 @@ static int property_imgparams(struct mp_image_params p, int action, void *arg)
static struct mp_image_params get_video_out_params(struct MPContext *mpctx)
{
- if (!mpctx->vo_chain || mpctx->vo_chain->vf->initialized < 1)
+ if (!mpctx->vo_chain)
return (struct mp_image_params){0};
- return mpctx->vo_chain->vf->output_params;
+ return mpctx->vo_chain->filter->output_params;
}
static int mp_property_vo_imgparams(void *ctx, struct m_property *prop,
@@ -2636,8 +2649,8 @@ static int mp_property_vd_imgparams(void *ctx, struct m_property *prop,
struct track *track = mpctx->current_track[0][STREAM_VIDEO];
struct mp_codec_params *c =
track && track->stream ? track->stream->codec : NULL;
- if (vo_c->vf->input_params.imgfmt) {
- return property_imgparams(vo_c->vf->input_params, action, arg);
+ if (vo_c->filter->input_params.imgfmt) {
+ return property_imgparams(vo_c->filter->input_params, action, arg);
} else if (c && c->disp_w && c->disp_h) {
// Simplistic fallback for stupid scripts querying "width"/"height"
// before the first frame is decoded.
@@ -2975,7 +2988,7 @@ static int mp_property_aspect(void *ctx, struct m_property *prop,
float aspect = mpctx->opts->movie_aspect;
if (mpctx->vo_chain && aspect <= 0) {
- struct mp_image_params *params = &mpctx->vo_chain->vf->input_params;
+ struct mp_image_params *params = &mpctx->vo_chain->filter->input_params;
if (params && params->p_w > 0 && params->p_h > 0) {
int d_w, d_h;
mp_image_params_get_dsize(params, &d_w, &d_h);
@@ -5470,11 +5483,17 @@ int run_command(struct MPContext *mpctx, struct mp_cmd *cmd, struct mpv_node *re
return edit_filters_osd(mpctx, STREAM_VIDEO, cmd->args[0].v.s,
cmd->args[1].v.s, msg_osd);
- case MP_CMD_VF_COMMAND:
+ case MP_CMD_VF_COMMAND: {
if (!mpctx->vo_chain)
return -1;
- return vf_send_command(mpctx->vo_chain->vf, cmd->args[0].v.s,
- cmd->args[1].v.s, cmd->args[2].v.s);
+ struct mp_filter_command filter_cmd = {
+ .type = MP_FILTER_COMMAND_TEXT,
+ .cmd = cmd->args[1].v.s,
+ .arg = cmd->args[2].v.s,
+ };
+ return mp_output_chain_command(mpctx->vo_chain->filter, cmd->args[0].v.s,
+ &filter_cmd) ? 0 : -1;
+ }
#if HAVE_LIBAF
case MP_CMD_AF_COMMAND:
@@ -5785,9 +5804,6 @@ void mp_option_change_callback(void *ctx, struct m_config_option *co, int flags)
if (flags & UPDATE_TERM)
mp_update_logging(mpctx, false);
- if (flags & UPDATE_DEINT)
- recreate_auto_filters(mpctx);
-
if (flags & UPDATE_OSD) {
for (int n = 0; n < NUM_PTRACKS; n++) {
struct track *track = mpctx->current_track[n][STREAM_SUB];
diff --git a/player/core.h b/player/core.h
index f2fed55366..c980e068fe 100644
--- a/player/core.h
+++ b/player/core.h
@@ -26,6 +26,8 @@
#include "libmpv/client.h"
#include "common/common.h"
+#include "filters/filter.h"
+#include "filters/f_output_chain.h"
#include "options/options.h"
#include "sub/osd.h"
#include "audio/aframe.h"
@@ -172,15 +174,14 @@ struct vo_chain {
struct mp_hwdec_devices *hwdec_devs;
double container_fps;
- struct vf_chain *vf;
+ struct mp_output_chain *filter;
+
+ //struct vf_chain *vf;
struct vo *vo;
// 1-element input frame queue.
struct mp_image *input_mpi;
- // Last known input_mpi format (so vf can be reinitialized any time).
- struct mp_image_params input_format;
-
struct track *track;
struct lavfi_pad *filter_src;
struct dec_video *video_src;
@@ -319,6 +320,8 @@ typedef struct MPContext {
struct lavfi *lavfi;
+ struct mp_filter *filter_root;
+
struct ao *ao;
struct mp_aframe *ao_decoder_fmt; // for weak gapless audio check
struct ao_chain *ao_chain;
@@ -631,6 +634,5 @@ void uninit_video_out(struct MPContext *mpctx);
void uninit_video_chain(struct MPContext *mpctx);
double calc_average_frame_duration(struct MPContext *mpctx);
int init_video_decoder(struct MPContext *mpctx, struct track *track);
-void recreate_auto_filters(struct MPContext *mpctx);
#endif /* MPLAYER_MP_CORE_H */
diff --git a/player/loadfile.c b/player/loadfile.c
index 4a886ff156..aa35d38ddf 100644
--- a/player/loadfile.c
+++ b/player/loadfile.c
@@ -1175,6 +1175,8 @@ static void play_current_file(struct MPContext *mpctx)
mpctx->display_sync_error = 0.0;
mpctx->display_sync_active = false;
mpctx->seek = (struct seek_params){ 0 };
+ mpctx->filter_root = mp_filter_create_root(mpctx->global);
+ mp_filter_root_set_wakeup_cb(mpctx->filter_root, mp_wakeup_core_cb, mpctx);
reset_playback_state(mpctx);
@@ -1405,6 +1407,7 @@ terminate_playback:
uninit_demuxer(mpctx);
if (!opts->gapless_audio && !mpctx->encode_lavc_ctx)
uninit_audio_out(mpctx);
+ TA_FREEP(&mpctx->filter_root);
mpctx->playback_initialized = false;
diff --git a/player/playloop.c b/player/playloop.c
index 535bff883f..610bbcdcbf 100644
--- a/player/playloop.c
+++ b/player/playloop.c
@@ -43,7 +43,6 @@
#include "demux/demux.h"
#include "stream/stream.h"
#include "sub/osd.h"
-#include "video/filter/vf.h"
#include "video/decode/dec_video.h"
#include "video/out/vo.h"
@@ -223,6 +222,8 @@ void reset_playback_state(struct MPContext *mpctx)
audio_reset_decoding(mpctx->tracks[n]->d_audio);
}
+ mp_filter_reset(mpctx->filter_root);
+
reset_video_state(mpctx);
reset_audio_state(mpctx);
reset_subtitle_state(mpctx);
@@ -1159,6 +1160,8 @@ void run_playloop(struct MPContext *mpctx)
handle_osd_redraw(mpctx);
+ if (mp_filter_run(mpctx->filter_root))
+ mp_wakeup_core(mpctx);
mp_wait_events(mpctx);
handle_pause_on_low_cache(mpctx);
diff --git a/player/sub.c b/player/sub.c
index 0de02ea61b..2d644e3e00 100644
--- a/player/sub.c
+++ b/player/sub.c
@@ -34,7 +34,6 @@
#include "demux/demux.h"
#include "video/mp_image.h"
#include "video/decode/dec_video.h"
-#include "video/filter/vf.h"
#include "core.h"
@@ -90,7 +89,7 @@ static bool update_subtitle(struct MPContext *mpctx, double video_pts,
return true;
if (mpctx->vo_chain) {
- struct mp_image_params params = mpctx->vo_chain->vf->input_params;
+ struct mp_image_params params = mpctx->vo_chain->filter->input_params;
if (params.imgfmt)
sub_control(dec_sub, SD_CTRL_SET_VIDEO_PARAMS, &params);
}
diff --git a/player/video.c b/player/video.c
index ef1423c8a5..8cf193368c 100644
--- a/player/video.c
+++ b/player/video.c
@@ -39,7 +39,6 @@
#include "stream/stream.h"
#include "sub/osd.h"
#include "video/hwdec.h"
-#include "video/filter/vf.h"
#include "video/decode/dec_video.h"
#include "video/decode/vd.h"
#include "video/out/vo.h"
@@ -49,8 +48,6 @@
#include "command.h"
#include "screenshot.h"
-#define VF_DEINTERLACE_LABEL "deinterlace"
-
enum {
// update_video() - code also uses: <0 error, 0 eof, >0 progress
VD_ERROR = -1,
@@ -68,129 +65,13 @@ static const char av_desync_help_text[] =
"position will not match to the video (see A-V status field).\n"
"\n";
-static void set_allowed_vo_formats(struct vo_chain *vo_c)
-{
- vo_query_formats(vo_c->vo, vo_c->vf->allowed_output_formats);
-}
-
-static int try_filter(struct vo_chain *vo_c, char *name, char *label, char **args)
-{
- struct vf_instance *vf = vf_append_filter(vo_c->vf, name, args);
- if (!vf)
- return -1;
-
- vf->label = talloc_strdup(vf, label);
-
- if (vf_reconfig(vo_c->vf, &vo_c->input_format) < 0) {
- vf_remove_filter(vo_c->vf, vf);
- // restore
- vf_reconfig(vo_c->vf, &vo_c->input_format);
- return -1;
- }
- return 0;
-}
-
-static bool check_output_format(struct vo_chain *vo_c, int imgfmt)
-{
- return vo_c->vf->output_params.imgfmt == imgfmt;
-}
-
-static int probe_deint_filters(struct vo_chain *vo_c)
-{
- if (check_output_format(vo_c, IMGFMT_VDPAU)) {
- char *args[5] = {"deint", "yes"};
- int pref = 0;
- vo_control(vo_c->vo, VOCTRL_GET_PREF_DEINT, &pref);
- pref = pref < 0 ? -pref : pref;
- if (pref > 0 && pref <= 4) {
- const char *types[] =
- {"", "first-field", "bob", "temporal", "temporal-spatial"};
- args[2] = "deint-mode";
- args[3] = (char *)types[pref];
- }
-
- return try_filter(vo_c, "vdpaupp", VF_DEINTERLACE_LABEL, args);
- }
- if (check_output_format(vo_c, IMGFMT_VAAPI))
- return try_filter(vo_c, "vavpp", VF_DEINTERLACE_LABEL, NULL);
- if (check_output_format(vo_c, IMGFMT_D3D11VA) ||
- check_output_format(vo_c, IMGFMT_D3D11NV12))
- return try_filter(vo_c, "d3d11vpp", VF_DEINTERLACE_LABEL, NULL);
- char *args[] = {"mode", "send_field", "deint", "interlaced", NULL};
- return try_filter(vo_c, "yadif", VF_DEINTERLACE_LABEL, args);
-}
-
-// Reconfigure the filter chain according to the new input format.
-static void filter_reconfig(struct MPContext *mpctx, struct vo_chain *vo_c)
-{
- struct mp_image_params params = vo_c->input_format;
- if (!params.imgfmt)
- return;
-
- set_allowed_vo_formats(vo_c);
-
- char *filters[] = {"autorotate", "deinterlace", NULL};
- for (int n = 0; filters[n]; n++) {
- struct vf_instance *vf = vf_find_by_label(vo_c->vf, filters[n]);
- if (vf)
- vf_remove_filter(vo_c->vf, vf);
- }
-
- if (vo_c->vf->initialized < 1) {
- if (vf_reconfig(vo_c->vf, &params) < 0)
- return;
- }
-
- if (params.rotate) {
- if (!(vo_c->vo->driver->caps & VO_CAP_ROTATE90) || params.rotate % 90) {
- // Try to insert a rotation filter.
- double angle = params.rotate / 360.0 * M_PI * 2;
- char *args[] = {"angle", mp_tprintf(30, "%f", angle),
- "ow", mp_tprintf(30, "rotw(%f)", angle),
- "oh", mp_tprintf(30, "roth(%f)", angle),
- NULL};
- if (try_filter(vo_c, "rotate", "autorotate", args) < 0)
- MP_ERR(vo_c, "Can't insert rotation filter.\n");
- }
- }
-
- if (mpctx->opts->deinterlace)
- probe_deint_filters(vo_c);
-}
-
-void recreate_auto_filters(struct MPContext *mpctx)
-{
- if (!mpctx->vo_chain)
- return;
-
- filter_reconfig(mpctx, mpctx->vo_chain);
-
- mp_force_video_refresh(mpctx);
-
- mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL);
-}
-
-static void recreate_video_filters(struct MPContext *mpctx)
+static bool recreate_video_filters(struct MPContext *mpctx)
{
struct MPOpts *opts = mpctx->opts;
struct vo_chain *vo_c = mpctx->vo_chain;
assert(vo_c);
- vf_destroy(vo_c->vf);
- vo_c->vf = vf_new(mpctx->global);
- vo_c->vf->hwdec_devs = vo_c->hwdec_devs;
- vo_c->vf->wakeup_callback = mp_wakeup_core_cb;
- vo_c->vf->wakeup_callback_ctx = mpctx;
- vo_c->vf->container_fps = vo_c->container_fps;
- vo_control(vo_c->vo, VOCTRL_GET_DISPLAY_FPS, &vo_c->vf->display_fps);
-
- vf_append_filter_list(vo_c->vf, opts->vf_settings);
-
- // for vf_sub
- osd_set_render_subs_in_filter(mpctx->osd,
- vf_control_any(vo_c->vf, VFCTRL_INIT_OSD, mpctx->osd) > 0);
-
- set_allowed_vo_formats(vo_c);
+ return mp_output_chain_update_filters(vo_c->filter, opts->vf_settings);
}
int reinit_video_filters(struct MPContext *mpctx)
@@ -199,25 +80,20 @@ int reinit_video_filters(struct MPContext *mpctx)
if (!vo_c)
return 0;
- bool need_reconfig = vo_c->vf->initialized != 0;
-
- recreate_video_filters(mpctx);
- if (need_reconfig)
- filter_reconfig(mpctx, vo_c);
+ if (!recreate_video_filters(mpctx))
+ return -1;
mp_force_video_refresh(mpctx);
mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL);
- return vo_c->vf->initialized;
+ return 0;
}
static void vo_chain_reset_state(struct vo_chain *vo_c)
{
mp_image_unrefp(&vo_c->input_mpi);
- if (vo_c->vf->initialized == 1)
- vf_seek_reset(vo_c->vf);
vo_seek_reset(vo_c->vo);
if (vo_c->video_src)
@@ -280,7 +156,7 @@ static void vo_chain_uninit(struct vo_chain *vo_c)
mp_image_unrefp(&vo_c->input_mpi);
mp_image_unrefp(&vo_c->cached_coverart);
- vf_destroy(vo_c->vf);
+ talloc_free(vo_c->filter->f);
talloc_free(vo_c);
// this does not free the VO
}
@@ -384,7 +260,10 @@ void reinit_video_chain_src(struct MPContext *mpctx, struct track *track)
mpctx->vo_chain = vo_c;
vo_c->log = mpctx->log;
vo_c->vo = mpctx->video_out;
- vo_c->vf = vf_new(mpctx->global);
+ vo_c->filter =
+ mp_output_chain_create(mpctx->filter_root, MP_OUTPUT_CHAIN_VIDEO);
+ vo_c->filter->container_fps = vo_c->container_fps;
+ mp_output_chain_set_vo(vo_c->filter, vo_c->vo);
vo_c->hwdec_devs = vo_c->vo->hwdec_devs;
@@ -407,7 +286,8 @@ void reinit_video_chain_src(struct MPContext *mpctx, struct track *track)
encode_lavc_set_video_fps(mpctx->encode_lavc_ctx, vo_c->container_fps);
#endif
- recreate_video_filters(mpctx);
+ if (!recreate_video_filters(mpctx))
+ goto err_out;
update_screensaver_state(mpctx);
@@ -435,7 +315,7 @@ void mp_force_video_refresh(struct MPContext *mpctx)
struct MPOpts *opts = mpctx->opts;
struct vo_chain *vo_c = mpctx->vo_chain;
- if (!vo_c || !vo_c->input_format.imgfmt)
+ if (!vo_c)
return;
// If not paused, the next frame should come soon enough.
@@ -499,68 +379,27 @@ static int decode_image(struct MPContext *mpctx)
}
}
-// Feed newly decoded frames to the filter, take care of format changes.
-// If eof=true, drain the filter chain, and return VD_EOF if empty.
static int video_filter(struct MPContext *mpctx, bool eof)
{
struct vo_chain *vo_c = mpctx->vo_chain;
- struct vf_chain *vf = vo_c->vf;
-
- if (vf->initialized < 0)
- return VD_ERROR;
-
- // There is already a filtered frame available.
- // If vf_needs_input() returns > 0, the filter wants input anyway.
- if (vf_output_frame(vf, eof) > 0 && vf_needs_input(vf) < 1)
- return VD_PROGRESS;
-
- // Decoder output is different from filter input?
- bool need_vf_reconfig = !vf->input_params.imgfmt || vf->initialized < 1 ||
- !mp_image_params_equal(&vo_c->input_format, &vf->input_params);
-
- // (If imgfmt==0, nothing was decoded yet, and the format is unknown.)
- if (need_vf_reconfig && vo_c->input_format.imgfmt) {
- // Drain the filter chain.
- if (vf_output_frame(vf, true) > 0)
- return VD_PROGRESS;
- // The filter chain is drained; execute the filter format change.
- vf->initialized = 0;
- filter_reconfig(mpctx, mpctx->vo_chain);
-
- mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL);
-
- // Most video filters don't work with hardware decoding, so this
- // might be the reason why filter reconfig failed.
- if (vf->initialized < 0 && vo_c->video_src &&
- video_vd_control(vo_c->video_src, VDCTRL_FORCE_HWDEC_FALLBACK, NULL)
- == CONTROL_OK)
- {
- // Fallback active; decoder will return software format next
- // time. Don't abort video decoding.
- vf->initialized = 0;
- mp_image_unrefp(&vo_c->input_mpi);
- vo_c->input_format = (struct mp_image_params){0};
- MP_VERBOSE(mpctx, "hwdec fallback due to filters.\n");
- return VD_PROGRESS; // try again
+ if (vo_c->input_mpi || eof) {
+ struct mp_frame frame = {MP_FRAME_VIDEO, vo_c->input_mpi};
+ if (!vo_c->input_mpi) {
+ frame = MP_EOF_FRAME;
+ if (vo_c->filter->got_input_eof)
+ return vo_c->filter->got_output_eof ? VD_EOF : VD_WAIT;
}
- if (vf->initialized < 1) {
- MP_FATAL(mpctx, "Cannot initialize video filters.\n");
- return VD_ERROR;
+ if (mp_pin_in_needs_data(vo_c->filter->f->pins[0])) {
+ if (osd_get_render_subs_in_filter(mpctx->osd))
+ update_subtitles(mpctx, vo_c->input_mpi->pts);
+ mp_pin_in_write(vo_c->filter->f->pins[0], frame);
+ vo_c->input_mpi = NULL;
+ return VD_PROGRESS;
}
- return VD_RECONFIG;
}
- // If something was decoded, and the filter chain is ready, filter it.
- if (!need_vf_reconfig && vo_c->input_mpi) {
- if (osd_get_render_subs_in_filter(mpctx->osd))
- update_subtitles(mpctx, vo_c->input_mpi->pts);
- vf_filter_frame(vf, vo_c->input_mpi);
- vo_c->input_mpi = NULL;
- return VD_PROGRESS;
- }
-
- return eof ? VD_EOF : VD_PROGRESS;
+ return VD_WAIT;
}
// Make sure at least 1 filtered image is available, decode new video if needed.
@@ -589,33 +428,15 @@ static int video_decode_and_filter(struct MPContext *mpctx)
}
if (vo_c->input_mpi) {
- vo_c->input_format = vo_c->input_mpi->params;
- vf_set_proto_frame(vo_c->vf, vo_c->input_mpi);
-
if (vo_c->is_coverart && !vo_c->cached_coverart)
vo_c->cached_coverart = mp_image_new_ref(vo_c->input_mpi);
+ } else if (r == VD_EOF) {
+ r = video_filter(mpctx, true);
}
- bool eof = !vo_c->input_mpi && (r == VD_EOF || r < 0);
- r = video_filter(mpctx, eof);
- if (r == VD_RECONFIG) // retry feeding decoded image
- r = video_filter(mpctx, eof);
return r;
}
-static int video_feed_async_filter(struct MPContext *mpctx)
-{
- struct vf_chain *vf = mpctx->vo_chain->vf;
-
- if (vf->initialized < 0)
- return VD_ERROR;
-
- if (vf_needs_input(vf) < 1)
- return 0;
- mp_wakeup_core(mpctx); // retry until done
- return video_decode_and_filter(mpctx);
-}
-
/* Modify video timing to match the audio timeline. There are two main
* reasons this is needed. First, video and audio can start from different
* positions at beginning of file or after a seek (MPlayer starts both
@@ -749,16 +570,33 @@ static int video_output_image(struct MPContext *mpctx)
int r = VD_PROGRESS;
if (needs_new_frame(mpctx)) {
// Filter a new frame.
- r = video_decode_and_filter(mpctx);
- if (r < 0)
- return r; // error
- struct mp_image *img = vf_read_output_frame(vo_c->vf);
+ if (!mp_pin_out_request_data(vo_c->filter->f->pins[1])) {
+ r = video_decode_and_filter(mpctx);
+ if (r < 0)
+ return r; // error
+ }
+ struct mp_image *img = NULL;
+ struct mp_frame frame = mp_pin_out_read(vo_c->filter->f->pins[1]);
+ if (frame.type == MP_FRAME_NONE && vo_c->filter->got_output_eof)
+ frame = MP_EOF_FRAME;
+ if (frame.type == MP_FRAME_NONE)
+ return video_decode_and_filter(mpctx);
+ if (frame.type == MP_FRAME_EOF) {
+ r = VD_EOF;
+ } else if (frame.type == MP_FRAME_VIDEO) {
+ img = frame.data;
+ } else {
+ MP_ERR(mpctx, "unexpected frame type %s\n",
+ mp_frame_type_str(frame.type));
+ mp_frame_unref(&frame);
+ return VD_ERROR;
+ }
if (img) {
double endpts = get_play_end_pts(mpctx);
if ((endpts != MP_NOPTS_VALUE && img->pts >= endpts) ||
mpctx->max_frames == 0)
{
- vf_unread_output_frame(vo_c->vf, img);
+ mp_pin_out_unread(vo_c->filter->f->pins[1], frame);
img = NULL;
r = VD_EOF;
} else if (hrseek && mpctx->hrseek_lastframe) {
@@ -799,6 +637,21 @@ static int video_output_image(struct MPContext *mpctx)
return have_new_frame(mpctx, r <= 0) ? VD_NEW_FRAME : r;
}
+static bool check_for_hwdec_fallback(struct MPContext *mpctx)
+{
+ struct vo_chain *vo_c = mpctx->vo_chain;
+
+ if (!vo_c->filter->failed_output_conversion || !vo_c->video_src)
+ return false;
+
+ if (video_vd_control(vo_c->video_src, VDCTRL_FORCE_HWDEC_FALLBACK, NULL)
+ != CONTROL_OK)
+ return false;
+
+ mp_output_chain_reset_harder(vo_c->filter);
+ return true;
+}
+
/* Update avsync before a new video frame is displayed. Actually, this can be
* called arbitrarily often before the actual display.
* This adjusts the time of the next video frame */
@@ -1228,7 +1081,13 @@ void write_video(struct MPContext *mpctx)
if (!mpctx->vo_chain)
return;
struct track *track = mpctx->vo_chain->track;
- struct vo *vo = mpctx->vo_chain->vo;
+ struct vo_chain *vo_c = mpctx->vo_chain;
+ struct vo *vo = vo_c->vo;
+
+ if (vo_c->filter->reconfig_happened) {
+ mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL);
+ vo_c->filter->reconfig_happened = false;
+ }
// Actual playback starts when both audio and video are ready.
if (mpctx->video_status == STATUS_READY)
@@ -1247,6 +1106,11 @@ void write_video(struct MPContext *mpctx)
return;
if (r == VD_EOF) {
+ if (check_for_hwdec_fallback(mpctx))
+ return;
+ if (vo_c->filter->failed_output_conversion)
+ goto error;
+
mpctx->delay = 0;
mpctx->last_av_difference = 0;
@@ -1334,7 +1198,7 @@ void write_video(struct MPContext *mpctx)
// (NB: in theory, the 1st frame after display sync mode change uses the
// wrong waiting mode)
if (!vo_is_ready_for_frame(vo, mpctx->display_sync_active ? -1 : pts)) {
- if (video_feed_async_filter(mpctx) < 0)
+ if (video_decode_and_filter(mpctx) < 0)
goto error;
return;
}
diff --git a/video/d3d.c b/video/d3d.c
index 8f04dcd0d6..b7a644dcfd 100644
--- a/video/d3d.c
+++ b/video/d3d.c
@@ -119,9 +119,6 @@ static void d3d11_complete_image_params(struct mp_image *img)
// According to hwcontex_d3d11va.h, this means DXGI_FORMAT_420_OPAQUE.
img->params.hw_flags = hw_frames->sw_format == AV_PIX_FMT_YUV420P
? MP_IMAGE_HW_FLAG_OPAQUE : 0;
-
- if (img->params.hw_subfmt == IMGFMT_NV12)
- mp_image_setfmt(img, IMGFMT_D3D11NV12);
}
static struct AVBufferRef *d3d11_create_standalone(struct mpv_global *global,
diff --git a/video/filter/refqueue.c b/video/filter/refqueue.c
index 6b2e5a2110..964fa29c08 100644
--- a/video/filter/refqueue.c
+++ b/video/filter/refqueue.c
@@ -17,12 +17,25 @@
#include <assert.h>
+#include <libavutil/buffer.h>
+
#include "common/common.h"
+#include "filters/f_autoconvert.h"
+#include "filters/filter_internal.h"
#include "video/mp_image.h"
#include "refqueue.h"
struct mp_refqueue {
+ struct mp_filter *filter;
+ struct mp_autoconvert *conv;
+ struct mp_pin *in, *out;
+
+ struct mp_image *in_format;
+
+ // Buffered frame in case of format changes.
+ struct mp_image *next;
+
int needed_past_frames;
int needed_future_frames;
int flags;
@@ -38,17 +51,37 @@ struct mp_refqueue {
int pos;
};
-struct mp_refqueue *mp_refqueue_alloc(void)
+static bool mp_refqueue_has_output(struct mp_refqueue *q);
+
+static void refqueue_dtor(void *p)
{
- struct mp_refqueue *q = talloc_zero(NULL, struct mp_refqueue);
+ struct mp_refqueue *q = p;
mp_refqueue_flush(q);
- return q;
+ mp_image_unrefp(&q->in_format);
+ talloc_free(q->conv->f);
}
-void mp_refqueue_free(struct mp_refqueue *q)
+struct mp_refqueue *mp_refqueue_alloc(struct mp_filter *f)
{
+ struct mp_refqueue *q = talloc_zero(f, struct mp_refqueue);
+ talloc_set_destructor(q, refqueue_dtor);
+ q->filter = f;
+
+ q->conv = mp_autoconvert_create(f);
+ if (!q->conv)
+ abort();
+
+ q->in = q->conv->f->pins[1];
+ mp_pin_connect(q->conv->f->pins[0], f->ppins[0]);
+ q->out = f->ppins[1];
+
mp_refqueue_flush(q);
- talloc_free(q);
+ return q;
+}
+
+void mp_refqueue_add_in_format(struct mp_refqueue *q, int fmt, int subfmt)
+{
+ mp_autoconvert_add_imgfmt(q->conv, fmt, subfmt);
}
// The minimum number of frames required before and after the current frame.
@@ -103,18 +136,12 @@ void mp_refqueue_flush(struct mp_refqueue *q)
q->pos = -1;
q->second_field = false;
q->eof = false;
+ mp_image_unrefp(&q->next);
}
-// Add a new frame to the queue. (Call mp_refqueue_next() to advance the
-// current frame and to discard unneeded past frames.)
-// Ownership goes to the mp_refqueue.
-// Passing NULL means EOF, in which case mp_refqueue_need_input() will return
-// false even if not enough future frames are available.
-void mp_refqueue_add_input(struct mp_refqueue *q, struct mp_image *img)
+static void mp_refqueue_add_input(struct mp_refqueue *q, struct mp_image *img)
{
- q->eof = !img;
- if (!img)
- return;
+ assert(img);
MP_TARRAY_INSERT_AT(q, q->queue, q->num_queue, 0, img);
q->pos++;
@@ -122,12 +149,12 @@ void mp_refqueue_add_input(struct mp_refqueue *q, struct mp_image *img)
assert(q->pos >= 0 && q->pos < q->num_queue);
}
-bool mp_refqueue_need_input(struct mp_refqueue *q)
+static bool mp_refqueue_need_input(struct mp_refqueue *q)
{
return q->pos < q->needed_future_frames && !q->eof;
}
-bool mp_refqueue_has_output(struct mp_refqueue *q)
+static bool mp_refqueue_has_output(struct mp_refqueue *q)
{
return q->pos >= 0 && !mp_refqueue_need_input(q);
}
@@ -161,18 +188,8 @@ static bool output_next_field(struct mp_refqueue *q)
return true;
}
-// Advance current field, depending on interlace flags.
-void mp_refqueue_next_field(struct mp_refqueue *q)
-{
- if (!mp_refqueue_has_output(q))
- return;
-
- if (!output_next_field(q))
- mp_refqueue_next(q);
-}
-
// Advance to next input frame (skips fields even in field output mode).
-void mp_refqueue_next(struct mp_refqueue *q)
+static void mp_refqueue_next(struct mp_refqueue *q)
{
if (!mp_refqueue_has_output(q))
return;
@@ -192,6 +209,16 @@ void mp_refqueue_next(struct mp_refqueue *q)
assert(q->pos >= -1 && q->pos < q->num_queue);
}
+// Advance current field, depending on interlace flags.
+static void mp_refqueue_next_field(struct mp_refqueue *q)
+{
+ if (!mp_refqueue_has_output(q))
+ return;
+
+ if (!output_next_field(q))
+ mp_refqueue_next(q);
+}
+
// Return a frame by relative position:
// -1: first past frame
// 0: current frame
@@ -219,3 +246,114 @@ bool mp_refqueue_is_second_field(struct mp_refqueue *q)
{
return mp_refqueue_has_output(q) && q->second_field;
}
+
+// Return non-NULL if a format change happened. A format change is defined by
+// a change in image parameters, using broad enough checks that happen to be
+// sufficient for all users of refqueue.
+// On format change, the refqueue transparently drains remaining frames, and
+// once that is done, this function returns a mp_image reference of the new
+// frame. Reinit the low level video processor based on it, and then leave the
+// reference alone and continue normally.
+// All frames returned in the future will have a compatible format.
+struct mp_image *mp_refqueue_execute_reinit(struct mp_refqueue *q)
+{
+ if (mp_refqueue_has_output(q) || !q->next)
+ return NULL;
+
+ struct mp_image *cur = q->next;
+ q->next = NULL;
+
+ mp_image_unrefp(&q->in_format);
+ mp_refqueue_flush(q);
+
+ q->in_format = mp_image_new_ref(cur);
+ if (!q->in_format)
+ abort();
+ mp_image_unref_data(q->in_format);
+
+ mp_refqueue_add_input(q, cur);
+ return cur;
+}
+
+// Main processing function. Call this in the filter process function.
+// Returns if enough input frames are available for filtering, and output pin
+// needs data; in other words, if this returns true, you render a frame and
+// output it.
+// If this returns true, you must call mp_refqueue_write_out_pin() to make
+// progress.
+bool mp_refqueue_can_output(struct mp_refqueue *q)
+{
+ if (!mp_pin_in_needs_data(q->out))
+ return false;
+
+ // Strictly return any output first to reduce latency.
+ if (mp_refqueue_has_output(q))
+ return true;
+
+ if (q->next) {
+ // Make it call again for mp_refqueue_execute_reinit().
+ mp_filter_internal_mark_progress(q->filter);
+ return false;
+ }
+
+ struct mp_frame frame = mp_pin_out_read(q->in);
+ if (frame.type == MP_FRAME_NONE)
+ return false;
+
+ if (frame.type == MP_FRAME_EOF) {
+ q->eof = true;
+ if (mp_refqueue_has_output(q)) {
+ mp_pin_out_unread(q->in, frame);
+ return true;
+ }
+ mp_pin_in_write(q->out, frame);
+ mp_refqueue_flush(q);
+ return false;
+ }
+
+ if (frame.type != MP_FRAME_VIDEO) {
+ MP_ERR(q->filter, "unsupported frame type\n");
+ mp_frame_unref(&frame);
+ mp_filter_internal_mark_failed(q->filter);
+ return false;
+ }
+
+ struct mp_image *img = frame.data;
+
+ if (!q->in_format || !!q->in_format->hwctx != !!img->hwctx ||
+ (img->hwctx && img->hwctx->data != q->in_format->hwctx->data) ||
+ !mp_image_params_equal(&q->in_format->params, &img->params))
+ {
+ q->next = img;
+ q->eof = true;
+ mp_filter_internal_mark_progress(q->filter);
+ return false;
+ }
+
+ mp_refqueue_add_input(q, img);
+
+ if (mp_refqueue_has_output(q))
+ return true;
+
+ mp_pin_out_request_data(q->in);
+ return false;
+}
+
+// (Accepts NULL for generic errors.)
+void mp_refqueue_write_out_pin(struct mp_refqueue *q, struct mp_image *mpi)
+{
+ if (mpi) {
+ mp_pin_in_write(q->out, MAKE_FRAME(MP_FRAME_VIDEO, mpi));
+ } else {
+ MP_WARN(q->filter, "failed to output frame\n");
+ mp_filter_internal_mark_failed(q->filter);
+ }
+ mp_refqueue_next_field(q);
+}
+
+// Return frame for current format (without data). Reference is owned by q,
+// might go away on further queue accesses. NULL if none yet.
+struct mp_image *mp_refqueue_get_format(struct mp_refqueue *q)
+{
+ return q->in_format;
+}
diff --git a/video/filter/refqueue.h b/video/filter/refqueue.h
index bb23506ac2..0a8ace0031 100644
--- a/video/filter/refqueue.h
+++ b/video/filter/refqueue.h
@@ -3,22 +3,26 @@
#include <stdbool.h>
+#include "filters/filter.h"
+
// A helper for deinterlacers which require past/future reference frames.
struct mp_refqueue;
-struct mp_refqueue *mp_refqueue_alloc(void);
-void mp_refqueue_free(struct mp_refqueue *q);
+struct mp_refqueue *mp_refqueue_alloc(struct mp_filter *f);
+
+void mp_refqueue_add_in_format(struct mp_refqueue *q, int fmt, int subfmt);
void mp_refqueue_set_refs(struct mp_refqueue *q, int past, int future);
void mp_refqueue_flush(struct mp_refqueue *q);
-void mp_refqueue_add_input(struct mp_refqueue *q, struct mp_image *img);
-bool mp_refqueue_need_input(struct mp_refqueue *q);
-bool mp_refqueue_has_output(struct mp_refqueue *q);
-void mp_refqueue_next(struct mp_refqueue *q);
-void mp_refqueue_next_field(struct mp_refqueue *q);
struct mp_image *mp_refqueue_get(struct mp_refqueue *q, int pos);
+struct mp_image *mp_refqueue_execute_reinit(struct mp_refqueue *q);
+bool mp_refqueue_can_output(struct mp_refqueue *q);
+void mp_refqueue_write_out_pin(struct mp_refqueue *q, struct mp_image *mpi);
+
+struct mp_image *mp_refqueue_get_format(struct mp_refqueue *q);
+
enum {
MP_MODE_DEINT = (1 << 0), // deinterlacing enabled
MP_MODE_OUTPUT_FIELDS = (1 << 1), // output fields separately
diff --git a/video/filter/vf.c b/video/filter/vf.c
deleted file mode 100644
index d5df466ba8..0000000000
--- a/video/filter/vf.c
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * This file is part of mpv.
- *
- * mpv is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * mpv is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <assert.h>
-#include <sys/types.h>
-#include <libavutil/buffer.h>
-#include <libavutil/common.h>
-#include <libavutil/mem.h>
-
-#include "config.h"
-
-#include "common/common.h"
-#include "common/global.h"
-#include "common/msg.h"
-#include "options/m_option.h"
-#include "options/m_config.h"
-
-#include "options/options.h"
-
-#include "video/img_format.h"
-#include "video/mp_image.h"
-#include "video/mp_image_pool.h"
-#include "vf.h"
-
-extern const vf_info_t vf_info_format;
-extern const vf_info_t vf_info_sub;
-extern const vf_info_t vf_info_convert;
-extern const vf_info_t vf_info_lavfi;
-extern const vf_info_t vf_info_lavfi_bridge;
-extern const vf_info_t vf_info_vaapi;
-extern const vf_info_t vf_info_vapoursynth;
-extern const vf_info_t vf_info_vapoursynth_lazy;
-extern const vf_info_t vf_info_vdpaupp;
-extern const vf_info_t vf_info_d3d11vpp;
-
-// list of available filters:
-static const vf_info_t *const filter_list[] = {
- &vf_info_format,
- &vf_info_sub,
- &vf_info_convert,
- &vf_info_lavfi,
- &vf_info_lavfi_bridge,
-#if HAVE_VAPOURSYNTH_CORE && HAVE_VAPOURSYNTH
- &vf_info_vapoursynth,
-#endif
-#if HAVE_VAPOURSYNTH_CORE && HAVE_VAPOURSYNTH_LAZY
- &vf_info_vapoursynth_lazy,
-#endif
-#if HAVE_VAAPI
- &vf_info_vaapi,
-#endif
-#if HAVE_VDPAU
- &vf_info_vdpaupp,
-#endif
-#if HAVE_D3D_HWACCEL
- &vf_info_d3d11vpp,
-#endif
- NULL
-};
-
-static void vf_uninit_filter(vf_instance_t *vf);
-
-static bool get_desc(struct m_obj_desc *dst, int index)
-{
- if (index >= MP_ARRAY_SIZE(filter_list) - 1)
- return false;
- const vf_info_t *vf = filter_list[index];
- *dst = (struct m_obj_desc) {
- .name = vf->name,
- .description = vf->description,
- .priv_size = vf->priv_size,
- .priv_defaults = vf->priv_defaults,
- .options = vf->options,
- .p = vf,
- .print_help = vf->print_help,
- };
- return true;
-}
-
-// For the vf option
-const struct m_obj_list vf_obj_list = {
- .get_desc = get_desc,
- .description = "video filters",
- .allow_disable_entries = true,
- .allow_unknown_entries = true,
-};
-
-// Try the cmd on each filter (starting with the first), and stop at the first
-// filter which does not return CONTROL_UNKNOWN for it.
-int vf_control_any(struct vf_chain *c, int cmd, void *arg)
-{
- for (struct vf_instance *cur = c->first; cur; cur = cur->next) {
- if (cur->control) {
- int r = cur->control(cur, cmd, arg);
- if (r != CONTROL_UNKNOWN)
- return r;
- }
- }
- return CONTROL_UNKNOWN;
-}
-
-int vf_control_by_label(struct vf_chain *c,int cmd, void *arg, bstr label)
-{
- char *label_str = bstrdup0(NULL, label);
- struct vf_instance *cur = vf_find_by_label(c, label_str);
- talloc_free(label_str);
- if (cur) {
- return cur->control ? cur->control(cur, cmd, arg) : CONTROL_NA;
- } else {
- return CONTROL_UNKNOWN;
- }
-}
-
-static void vf_control_all(struct vf_chain *c, int cmd, void *arg)
-{
- for (struct vf_instance *cur = c->first; cur; cur = cur->next) {
- if (cur->control)
- cur->control(cur, cmd, arg);
- }
-}
-
-int vf_send_command(struct vf_chain *c, char *label, char *cmd, char *arg)
-{
- char *args[2] = {cmd, arg};
- if (strcmp(label, "all") == 0) {
- vf_control_all(c, VFCTRL_COMMAND, args);
- return 0;
- } else {
- return vf_control_by_label(c, VFCTRL_COMMAND, args, bstr0(label));
- }
-}
-
-static void vf_fix_img_params(struct mp_image *img, struct mp_image_params *p)
-{
- // Filters must absolutely set these correctly.
- assert(img->w == p->w && img->h == p->h);
- assert(img->imgfmt == p->imgfmt);
- // Too many things don't set this correctly.
- // If --colormatrix is used, decoder and filter chain disagree too.
- // In general, it's probably more convenient to force these here,
- // instead of requiring filters to set these correctly.
- img->params = *p;
-}
-
-// Get a new image for filter output, with size and pixel format according to
-// the last vf_config call.
-struct mp_image *vf_alloc_out_image(struct vf_instance *vf)
-{
- struct mp_image_params *p = &vf->fmt_out;
- assert(p->imgfmt);
- struct mp_image *img = mp_image_pool_get(vf->out_pool, p->imgfmt, p->w, p->h);
- if (img)
- vf_fix_img_params(img, p);
- return img;
-}
-
-// Returns false on failure; then the image can't be written to.
-bool vf_make_out_image_writeable(struct vf_instance *vf, struct mp_image *img)
-{
- struct mp_image_params *p = &vf->fmt_out;
- assert(p->imgfmt);
- assert(p->imgfmt == img->imgfmt);
- assert(p->w == img->w && p->h == img->h);
- return mp_image_pool_make_writeable(vf->out_pool, img);
-}
-
-//============================================================================
-
-// The default callback assumes all formats are passed through.
-static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt)
-{
- return vf_next_query_format(vf, fmt);
-}
-
-void vf_print_filter_chain(struct vf_chain *c, int msglevel,
- struct vf_instance *vf)
-{
- if (!mp_msg_test(c->log, msglevel))
- return;
-
- for (vf_instance_t *f = c->first; f; f = f->next) {
- char b[256] = {0};
- mp_snprintf_cat(b, sizeof(b), " [%s] ", f->full_name);
- if (f->label)
- mp_snprintf_cat(b, sizeof(b), "\"%s\" ", f->label);
- mp_snprintf_cat(b, sizeof(b), "%s", mp_image_params_to_str(&f->fmt_out));
- if (f->autoinserted)
- mp_snprintf_cat(b, sizeof(b), " [a]");
- if (f == vf)
- mp_snprintf_cat(b, sizeof(b), " <---");
- mp_msg(c->log, msglevel, "%s\n", b);
- }
-}
-
-static struct vf_instance *vf_open(struct vf_chain *c, const char *name,
- char **args)
-{
- const char *lavfi_name = NULL;
- char **lavfi_args = NULL;
- struct m_obj_desc desc;
- if (!m_obj_list_find(&desc, &vf_obj_list, bstr0(name))) {
- if (!m_obj_list_find(&desc, &vf_obj_list, bstr0("lavfi-bridge"))) {
- MP_ERR(c, "Couldn't find video filter '%s'.\n", name);
- return NULL;
- }
- lavfi_name = name;
- lavfi_args = args;
- args = NULL;
- if (strncmp(lavfi_name, "lavfi-", 6) == 0)
- lavfi_name += 6;
- }
- vf_instance_t *vf = talloc_zero(NULL, struct vf_instance);
- *vf = (vf_instance_t) {
- .full_name = talloc_strdup(vf, name),
- .info = desc.p,
- .log = mp_log_new(vf, c->log, name),
- .hwdec_devs = c->hwdec_devs,
- .query_format = vf_default_query_format,
- .out_pool = mp_image_pool_new(vf),
- .chain = c,
- };
- struct m_config *config =
- m_config_from_obj_desc_and_args(vf, vf->log, c->global, &desc,
- name, c->opts->vf_defs, args);
- if (!config)
- goto error;
- if (lavfi_name) {
- // Pass the filter arguments as proper sub-options to the bridge filter.
- struct m_config_option *name_opt = m_config_get_co(config, bstr0("name"));
- assert(name_opt);
- assert(name_opt->opt->type == &m_option_type_string);
- if (m_config_set_option_raw(config, name_opt, &lavfi_name, 0) < 0)
- goto error;
- struct m_config_option *opts = m_config_get_co(config, bstr0("opts"));
- assert(opts);
- assert(opts->opt->type == &m_option_type_keyvalue_list);
- if (m_config_set_option_raw(config, opts, &lavfi_args, 0) < 0)
- goto error;
- vf->full_name = talloc_asprintf(vf, "%s (lavfi)", vf->full_name);
- }
- vf->priv = config->optstruct;
- int retcode = vf->info->open(vf);
- if (retcode < 1)
- goto error;
- return vf;
-
-error:
- MP_ERR(c, "Creating filter '%s' failed.\n", name);
- talloc_free(vf);
- return NULL;
-}
-
-static vf_instance_t *vf_open_filter(struct vf_chain *c, const char *name,
- char **args)
-{
- int i, l = 0;
- for (i = 0; args && args[2 * i]; i++)
- l += 1 + strlen(args[2 * i]) + 1 + strlen(args[2 * i + 1]);
- l += strlen(name);
- char *str = malloc(l + 1);
- if (!str)
- return NULL;
- char *p = str;
- p += sprintf(str, "%s", name);
- for (i = 0; args && args[2 * i]; i++)
- p += sprintf(p, " %s=%s", args[2 * i], args[2 * i + 1]);
- MP_INFO(c, "Opening video filter: [%s]\n", str);
- free(str);
- return vf_open(c, name, args);
-}
-
-void vf_remove_filter(struct vf_chain *c, struct vf_instance *vf)
-{
- assert(vf != c->first && vf != c->last); // these are sentinels
- struct vf_instance *prev = c->first;
- while (prev && prev->next != vf)
- prev = prev->next;
- assert(prev); // not inserted
- prev->next = vf->next;
- vf_uninit_filter(vf);
- c->initialized = 0;
-}
-
-struct vf_instance *vf_append_filter(struct vf_chain *c, const char *name,
- char **args)
-{
- struct vf_instance *vf = vf_open_filter(c, name, args);
- if (vf) {
- // Insert it before the last filter, which is the "out" pseudo-filter
- // (But after the "in" pseudo-filter)
- struct vf_instance **pprev = &c->first->next;
- while (*pprev && (*pprev)->next)
- pprev = &(*pprev)->next;
- vf->next = *pprev ? *pprev : NULL;
- *pprev = vf;
- c->initialized = 0;
- }
- return vf;
-}
-
-int vf_append_filter_list(struct vf_chain *c, struct m_obj_settings *list)
-{
- for (int n = 0; list && list[n].name; n++) {
- if (!list[n].enabled)
- continue;
- struct vf_instance *vf =
- vf_append_filter(c, list[n].name, list[n].attribs);
- if (vf) {
- if (list[n].label) {
- vf->label = talloc_strdup(vf, list[n].label);
- } else {
- for (int i = 0; i < 100; i++) {
- char* label = talloc_asprintf(vf, "%s.%02d", list[n].name, i);
- if (vf_find_by_label(c, label)) {
- talloc_free(label);
- } else {
- vf->label = label;
- break;
- }
- }
- }
- }
- }
- return 0;
-}
-
-// Used by filters to add a filtered frame to the output queue.
-// Ownership of img is transferred from caller to the filter chain.
-void vf_add_output_frame(struct vf_instance *vf, struct mp_image *img)
-{
- if (img) {
- vf_fix_img_params(img, &vf->fmt_out);
- MP_TARRAY_APPEND(vf, vf->out_queued, vf->num_out_queued, img);
- }
-}
-
-static bool vf_has_output_frame(struct vf_instance *vf)
-{
- if (!vf->num_out_queued && vf->filter_out) {
- if (vf->filter_out(vf) < 0)
- MP_ERR(vf, "Error filtering frame.\n");
- }
- return vf->num_out_queued > 0;
-}
-
-static struct mp_image *vf_dequeue_output_frame(struct vf_instance *vf)
-{
- struct mp_image *res = NULL;
- if (vf_has_output_frame(vf)) {
- res = vf->out_queued[0];
- MP_TARRAY_REMOVE_AT(vf->out_queued, vf->num_out_queued, 0);
- }
- return res;
-}
-
-static int vf_do_filter(struct vf_instance *vf, struct mp_image *img)
-{
- assert(vf->fmt_in.imgfmt);
- if (img)
- assert(mp_image_params_equal(&img->params, &vf->fmt_in));
-
- if (vf->filter_ext) {
- int r = vf->filter_ext(vf, img);
- if (r < 0)
- MP_ERR(vf, "Error filtering frame.\n");
- return r;
- } else {
- if (img) {
- if (vf->filter)
- img = vf->filter(vf, img);
- vf_add_output_frame(vf, img);
- }
- return 0;
- }
-}
-
-// Input a frame into the filter chain. Ownership of img is transferred.
-// Return >= 0 on success, < 0 on failure (even if output frames were produced)
-int vf_filter_frame(struct vf_chain *c, struct mp_image *img)
-{
- assert(img);
- if (c->initialized < 1) {
- talloc_free(img);
- return -1;
- }
- assert(mp_image_params_equal(&img->params, &c->input_params));
- return vf_do_filter(c->first, img);
-}
-
-// Similar to vf_output_frame(), but only ensure that the filter "until" has
-// output, instead of the end of the filter chain.
-static int vf_output_frame_until(struct vf_chain *c, struct vf_instance *until,
- bool eof)
-{
- if (until->num_out_queued)
- return 1;
- if (c->initialized < 1)
- return -1;
- while (1) {
- struct vf_instance *last = NULL;
- for (struct vf_instance * cur = c->first; cur; cur = cur->next) {
- // Flush remaining frames on EOF, but do that only if the previous
- // filters have been flushed (i.e. they have no more output).
- if (eof && !last) {
- int r = vf_do_filter(cur, NULL);
- if (r < 0)
- return r;
- }
- if (vf_has_output_frame(cur))
- last = cur;
- if (cur == until)
- break;
- }
- if (!last)
- return 0;
- if (last == until)
- return 1;
- int r = vf_do_filter(last->next, vf_dequeue_output_frame(last));
- if (r < 0)
- return r;
- }
-}
-
-// Output the next queued image (if any) from the full filter chain.
-// The frame can be retrieved with vf_read_output_frame().
-// eof: if set, assume there's no more input i.e. vf_filter_frame() will
-// not be called (until reset) - flush all internally delayed frames
-// returns: -1: error, 0: no output, 1: output available
-int vf_output_frame(struct vf_chain *c, bool eof)
-{
- return vf_output_frame_until(c, c->last, eof);
-}
-
-struct mp_image *vf_read_output_frame(struct vf_chain *c)
-{
- if (!c->last->num_out_queued)
- vf_output_frame(c, false);
- return vf_dequeue_output_frame(c->last);
-}
-
-// Undo the previous vf_read_output_frame().
-void vf_unread_output_frame(struct vf_chain *c, struct mp_image *img)
-{
- struct vf_instance *vf = c->last;
- MP_TARRAY_INSERT_AT(vf, vf->out_queued, vf->num_out_queued, 0, img);
-}
-
-// Some filters (vf_vapoursynth) filter on separate threads, and may need new
-// input from the decoder, even though the core does not need a new output image
-// yet (this is required to get proper pipelining in the filter). If the filter
-// needs new data, it will call c->wakeup_callback, which in turn causes the
-// core to recheck the filter chain, calling this function. Each filter is asked
-// whether it needs a frame (with vf->needs_input), and if so, it will try to
-// feed it a new frame. If this fails, it will request a new frame from the
-// core by returning 1.
-// returns -1: error, 0: nothing needed, 1: add new frame with vf_filter_frame()
-int vf_needs_input(struct vf_chain *c)
-{
- struct vf_instance *prev = c->first;
- for (struct vf_instance *cur = c->first; cur; cur = cur->next) {
- while (cur->needs_input && cur->needs_input(cur)) {
- // Get frames from preceding filters, or if there are none,
- // request new frames from decoder.
- int r = vf_output_frame_until(c, prev, false);
- if (r < 1)
- return r < 0 ? -1 : 1;
- r = vf_do_filter(cur, vf_dequeue_output_frame(prev));
- if (r < 0)
- return r;
- }
- prev = cur;
- }
- return 0;
-}
-
-static void vf_forget_frames(struct vf_instance *vf)
-{
- for (int n = 0; n < vf->num_out_queued; n++)
- talloc_free(vf->out_queued[n]);
- vf->num_out_queued = 0;
-}
-
-static void vf_chain_forget_frames(struct vf_chain *c)
-{
- for (struct vf_instance *cur = c->first; cur; cur = cur->next)
- vf_forget_frames(cur);
-}
-
-void vf_seek_reset(struct vf_chain *c)
-{
- vf_control_all(c, VFCTRL_SEEK_RESET, NULL);
- vf_chain_forget_frames(c);
-}
-
-int vf_next_query_format(struct vf_instance *vf, unsigned int fmt)
-{
- return fmt >= IMGFMT_START && fmt < IMGFMT_END
- ? vf->last_outfmts[fmt - IMGFMT_START] : 0;
-}
-
-// Mark accepted input formats in fmts[]. Note that ->query_format will
-// typically (but not always) call vf_next_query_format() to check whether
-// an output format is supported.
-static void query_formats(uint8_t *fmts, struct vf_instance *vf)
-{
- for (int n = IMGFMT_START; n < IMGFMT_END; n++)
- fmts[n - IMGFMT_START] = vf->query_format(vf, n);
-}
-
-static bool is_conv_filter(struct vf_instance *vf)
-{
- return vf && (strcmp(vf->info->name, "convert") == 0 || vf->autoinserted);
-}
-
-static const char *find_conv_filter(uint8_t *fmts_in, uint8_t *fmts_out)
-{
- for (int n = 0; filter_list[n]; n++) {
- if (filter_list[n]->test_conversion) {
- for (int a = IMGFMT_START; a < IMGFMT_END; a++) {
- for (int b = IMGFMT_START; b < IMGFMT_END; b++) {
- if (fmts_in[a - IMGFMT_START] && fmts_out[b - IMGFMT_START] &&
- filter_list[n]->test_conversion(a, b))
- return filter_list[n]->name;
- }
- }
- }
- }
- return "convert";
-}
-
-static void update_formats(struct vf_chain *c, struct vf_instance *vf,
- uint8_t *fmts)
-{
- if (vf->next)
- update_formats(c, vf->next, vf->last_outfmts);
- query_formats(fmts, vf);
- bool has_in = false, has_out = false;
- for (int n = IMGFMT_START; n < IMGFMT_END; n++) {
- has_in |= !!fmts[n - IMGFMT_START];
- has_out |= !!vf->last_outfmts[n - IMGFMT_START];
- }
- if (has_out && !has_in && !is_conv_filter(vf) &&
- !is_conv_filter(vf->next))
- {
- // If there are output formats, but no input formats (meaning the
- // filters after vf work, but vf can't output any format the filters
- // after it accept), try to insert a conversion filter.
- MP_INFO(c, "Using conversion filter.\n");
- // Determine which output formats the filter _could_ accept. For this
- // to work after the conversion filter is inserted, it is assumed that
- // conversion filters have a single set of in/output formats that can
- // be converted to each other.
- uint8_t out_formats[IMGFMT_END - IMGFMT_START];
- for (int n = IMGFMT_START; n < IMGFMT_END; n++) {
- out_formats[n - IMGFMT_START] = vf->last_outfmts[n - IMGFMT_START];
- vf->last_outfmts[n - IMGFMT_START] = 1;
- }
- query_formats(fmts, vf);
- const char *filter = find_conv_filter(fmts, out_formats);
- char **args = NULL;
- char *args_no_warn[] = {"warn", "no", NULL};
- if (strcmp(filter, "scale") == 0)
- args = args_no_warn;
- struct vf_instance *conv = vf_open(c, filter, args);
- if (conv) {
- conv->autoinserted = true;
- conv->next = vf->next;
- vf->next = conv;
- update_formats(c, conv, vf->last_outfmts);
- query_formats(fmts, vf);
- }
- }
- for (int n = IMGFMT_START; n < IMGFMT_END; n++)
- has_in |= !!fmts[n - IMGFMT_START];
- if (!has_in) {
- // Pretend all out formats work. All this does it getting better
- // error messages in some cases, so we can configure all filter
- // until it fails, which will be visible in vf_print_filter_chain().
- for (int n = IMGFMT_START; n < IMGFMT_END; n++)
- vf->last_outfmts[n - IMGFMT_START] = 1;
- query_formats(fmts, vf);
- }
-}
-
-// Insert a conversion filter _after_ vf.
-// vf needs to have been successfully configured, vf->next unconfigured but
-// with formats negotiated.
-static void auto_insert_conversion_filter_if_needed(struct vf_chain *c,
- struct vf_instance *vf)
-{
- if (!vf->next || vf->next->query_format(vf->next, vf->fmt_out.imgfmt) ||
- is_conv_filter(vf) || is_conv_filter(vf->next))
- return;
-
- MP_INFO(c, "Using conversion filter.\n");
-
- uint8_t fmts[IMGFMT_END - IMGFMT_START];
- query_formats(fmts, vf->next);
-
- uint8_t out_formats[IMGFMT_END - IMGFMT_START];
- for (int n = IMGFMT_START; n < IMGFMT_END; n++)
- out_formats[n - IMGFMT_START] = n == vf->fmt_out.imgfmt;
-
- const char *filter = find_conv_filter(out_formats, fmts);
- char **args = NULL;
- char *args_no_warn[] = {"warn", "no", NULL};
- if (strcmp(filter, "scale") == 0)
- args = args_no_warn;
- struct vf_instance *conv = vf_open(c, filter, args);
- if (conv) {
- conv->autoinserted = true;
- conv->next = vf->next;
- vf->next = conv;
- update_formats(c, conv, vf->last_outfmts);
- }
-}
-
-static int vf_reconfig_wrapper(struct vf_instance *vf,
- const struct mp_image_params *p)
-{
- vf_forget_frames(vf);
- if (vf->out_pool)
- mp_image_pool_clear(vf->out_pool);
-
- if (!vf->query_format(vf, p->imgfmt))
- return -2;
-
- vf->fmt_out = vf->fmt_in = *p;
-
- if (!mp_image_params_valid(&vf->fmt_in))
- return -2;
-
- int r = 0;
- if (vf->reconfig)
- r = vf->reconfig(vf, &vf->fmt_in, &vf->fmt_out);
-
- if (!mp_image_params_equal(&vf->fmt_in, p))
- r = -2;
-
- if (!mp_image_params_valid(&vf->fmt_out))
- r = -2;
-
- // Fix csp in case of pixel format change
- if (r >= 0)
- mp_image_params_guess_csp(&vf->fmt_out);
-
- return r;
-}
-
-int vf_reconfig(struct vf_chain *c, const struct mp_image_params *params)
-{
- int r = 0;
- vf_seek_reset(c);
- for (struct vf_instance *vf = c->first; vf; ) {
- struct vf_instance *next = vf->next;
- if (vf->autoinserted)
- vf_remove_filter(c, vf);
- vf = next;
- }
- c->input_params = *params;
- c->first->fmt_in = *params;
- struct mp_image_params cur = *params;
-
- uint8_t unused[IMGFMT_END - IMGFMT_START];
- update_formats(c, c->first, unused);
- AVBufferRef *hwfctx = c->in_hwframes_ref;
- struct vf_instance *failing = NULL;
- for (struct vf_instance *vf = c->first; vf; vf = vf->next) {
- av_buffer_unref(&vf->in_hwframes_ref);
- av_buffer_unref(&vf->out_hwframes_ref);
- vf->in_hwframes_ref = hwfctx ? av_buffer_ref(hwfctx) : NULL;
- vf->out_hwframes_ref = hwfctx ? av_buffer_ref(hwfctx) : NULL;
- r = vf_reconfig_wrapper(vf, &cur);
- if (r < 0) {
- failing = vf;
- break;
- }
- cur = vf->fmt_out;
- hwfctx = vf->out_hwframes_ref;
- // Recheck if the new output format works with the following filters.
- auto_insert_conversion_filter_if_needed(c, vf);
- }
- c->output_params = cur;
- c->initialized = r < 0 ? -1 : 1;
- int loglevel = r < 0 ? MSGL_WARN : MSGL_V;
- if (r == -2)
- MP_ERR(c, "Image formats incompatible or invalid.\n");
- mp_msg(c->log, loglevel, "Video filter chain:\n");
- vf_print_filter_chain(c, loglevel, failing);
- if (r < 0)
- c->output_params = (struct mp_image_params){0};
- return r;
-}
-
-// Hack to get mp_image.hwctx before vf_reconfig()
-void vf_set_proto_frame(struct vf_chain *c, struct mp_image *img)
-{
- av_buffer_unref(&c->in_hwframes_ref);
- c->in_hwframes_ref = img && img->hwctx ? av_buffer_ref(img->hwctx) : NULL;
-}
-
-struct vf_instance *vf_find_by_label(struct vf_chain *c, const char *label)
-{
- struct vf_instance *vf = c->first;
- while (vf) {
- if (vf->label && label && strcmp(vf->label, label) == 0)
- return vf;
- vf = vf->next;
- }
- return NULL;
-}
-
-static void vf_uninit_filter(vf_instance_t *vf)
-{
- av_buffer_unref(&vf->in_hwframes_ref);
- av_buffer_unref(&vf->out_hwframes_ref);
- if (vf->uninit)
- vf->uninit(vf);
- vf_forget_frames(vf);
- talloc_free(vf);
-}
-
-static int input_query_format(struct vf_instance *vf, unsigned int fmt)
-{
- // Setting fmt_in is guaranteed by vf_reconfig().
- if (fmt == vf->fmt_in.imgfmt)
- return vf_next_query_format(vf, fmt);
- return 0;
-}
-
-static int output_query_format(struct vf_instance *vf, unsigned int fmt)
-{
- struct vf_chain *c = (void *)vf->priv;
- if (fmt >= IMGFMT_START && fmt < IMGFMT_END)
- return c->allowed_output_formats[fmt - IMGFMT_START];
- return 0;
-}
-
-struct vf_chain *vf_new(struct mpv_global *global)
-{
- struct vf_chain *c = talloc_ptrtype(NULL, c);
- *c = (struct vf_chain){
- .opts = global->opts,
- .log = mp_log_new(c, global->log, "!vf"),
- .global = global,
- };
- static const struct vf_info in = { .name = "in" };
- c->first = talloc(c, struct vf_instance);
- *c->first = (struct vf_instance) {
- .full_name = "in",
- .log = c->log,
- .info = &in,
- .query_format = input_query_format,
- };
- static const struct vf_info out = { .name = "out" };
- c->last = talloc(c, struct vf_instance);
- *c->last = (struct vf_instance) {
- .full_name = "out",
- .log = c->log,
- .info = &out,
- .query_format = output_query_format,
- .priv = (void *)c,
- };
- c->first->next = c->last;
- return c;
-}
-
-void vf_destroy(struct vf_chain *c)
-{
- if (!c)
- return;
- av_buffer_unref(&c->in_hwframes_ref);
- while (c->first) {
- vf_instance_t *vf = c->first;
- c->first = vf->next;
- vf_uninit_filter(vf);
- }
- vf_chain_forget_frames(c);
- talloc_free(c);
-}
diff --git a/video/filter/vf.h b/video/filter/vf.h
deleted file mode 100644
index 5146a4d15b..0000000000
--- a/video/filter/vf.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * This file is part of mpv.
- *
- * mpv is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * mpv is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef MPLAYER_VF_H
-#define MPLAYER_VF_H
-
-#include <stdbool.h>
-
-#include "video/mp_image.h"
-#include "common/common.h"
-
-struct MPOpts;
-struct mpv_global;
-struct vf_instance;
-struct vf_priv_s;
-struct m_obj_settings;
-
-typedef struct vf_info {
- const char *description;
- const char *name;
- int (*open)(struct vf_instance *vf);
- int priv_size;
- const void *priv_defaults;
- const struct m_option *options;
- void (*print_help)(struct mp_log *log);
- bool (*test_conversion)(int in, int out);
-} vf_info_t;
-
-typedef struct vf_instance {
- const vf_info_t *info;
- char *full_name;
-
- // Initialize the filter. The filter must set *out to the same image
- // params as the images the filter functions will return for the given
- // *in format.
- // Note that by default, only formats reported as supported by query_format
- // will be allowed for *in.
- // Returns >= 0 on success, < 0 on error.
- int (*reconfig)(struct vf_instance *vf, struct mp_image_params *in,
- struct mp_image_params *out);
-
- int (*control)(struct vf_instance *vf, int request, void *data);
- int (*query_format)(struct vf_instance *vf, unsigned int fmt);
-
- // Filter mpi and return the result. The input mpi reference is owned by
- // the filter, the returned reference is owned by the caller.
- // Return NULL if the output frame is skipped.
- struct mp_image *(*filter)(struct vf_instance *vf, struct mp_image *mpi);
-
- // Like filter(), but can return an error code ( >= 0 means success). This
- // callback is also more practical when the filter can return multiple
- // output images. Use vf_add_output_frame() to queue output frames.
- // Warning: this is called with mpi==NULL if there is no more input at
- // all (i.e. the video has reached end of file condition). This
- // can be used to output delayed or otherwise remaining images.
- int (*filter_ext)(struct vf_instance *vf, struct mp_image *mpi);
-
- // Produce an output frame. This is called after filter or filter_ext.
- // You can add 0 or more frames with vf_add_output_frame(). (This allows
- // distributing the filter load over time -> typically add at most 1 frame.)
- // If this adds no frame (or is NULL), then the caller assumes that the
- // filter needs new input.
- // Return a negative value on error. (No more frames is not an error.)
- // May be called multiple times, even if the filter gives no output.
- int (*filter_out)(struct vf_instance *vf);
-
- // Optional function that checks whether the filter needs additional
- // input. This is for filters with asynchronous behavior: they filter
- // frames in the background, and to get good pipelining behavior, new
- // data should be fed, even if the playback core doesn't need any yet.
- bool (*needs_input)(struct vf_instance *vf);
-
- void (*uninit)(struct vf_instance *vf);
-
- char *label;
- bool autoinserted;
-
- struct mp_image_params fmt_in, fmt_out;
-
- // This is a dirty hack.
- struct AVBufferRef *in_hwframes_ref, *out_hwframes_ref;
-
- struct mp_image_pool *out_pool;
- struct vf_priv_s *priv;
- struct mp_log *log;
- struct mp_hwdec_devices *hwdec_devs;
-
- struct mp_image **out_queued;
- int num_out_queued;
-
- // Caches valid output formats.
- uint8_t last_outfmts[IMGFMT_END - IMGFMT_START];
-
- struct vf_chain *chain;
- struct vf_instance *next;
-} vf_instance_t;
-
-// A chain of video filters
-struct vf_chain {
- int initialized; // 0: no, 1: yes, -1: attempted to, but failed
-
- struct vf_instance *first, *last;
-
- struct mp_image_params input_params;
- struct mp_image_params output_params;
- uint8_t allowed_output_formats[IMGFMT_END - IMGFMT_START];
-
- double container_fps;
- double display_fps;
-
- struct mp_log *log;
- struct MPOpts *opts;
- struct mpv_global *global;
- struct mp_hwdec_devices *hwdec_devs;
-
- // This is a dirty hack.
- struct AVBufferRef *in_hwframes_ref;
-
- // Call when the filter chain wants new processing (for filters with
- // asynchronous behavior) - must be immutable once filters are created,
- // since they are supposed to call it from foreign threads.
- void (*wakeup_callback)(void *ctx);
- void *wakeup_callback_ctx;
-};
-
-enum vf_ctrl {
- VFCTRL_SEEK_RESET = 1, // reset on picture and PTS discontinuities
- VFCTRL_GET_METADATA, // Get frame metadata from lavfi filters (e.g., cropdetect)
- /* Hack to make the OSD state object available to vf_sub which
- * access OSD/subtitle state outside of normal OSD draw time. */
- VFCTRL_INIT_OSD,
- VFCTRL_COMMAND,
-};
-
-struct vf_chain *vf_new(struct mpv_global *global);
-void vf_destroy(struct vf_chain *c);
-void vf_set_proto_frame(struct vf_chain *c, struct mp_image *img);
-int vf_reconfig(struct vf_chain *c, const struct mp_image_params *params);
-int vf_control_any(struct vf_chain *c, int cmd, void *arg);
-int vf_control_by_label(struct vf_chain *c, int cmd, void *arg, bstr label);
-int vf_filter_frame(struct vf_chain *c, struct mp_image *img);
-int vf_output_frame(struct vf_chain *c, bool eof);
-int vf_needs_input(struct vf_chain *c);
-struct mp_image *vf_read_output_frame(struct vf_chain *c);
-void vf_unread_output_frame(struct vf_chain *c, struct mp_image *img);
-void vf_seek_reset(struct vf_chain *c);
-struct vf_instance *vf_append_filter(struct vf_chain *c, const char *name,
- char **args);
-void vf_remove_filter(struct vf_chain *c, struct vf_instance *vf);
-int vf_append_filter_list(struct vf_chain *c, struct m_obj_settings *list);
-struct vf_instance *vf_find_by_label(struct vf_chain *c, const char *label);
-void vf_print_filter_chain(struct vf_chain *c, int msglevel,
- struct vf_instance *vf);
-
-int vf_send_command(struct vf_chain *c, char *label, char *cmd, char *arg);
-
-// Filter internal API
-struct mp_image *vf_alloc_out_image(struct vf_instance *vf);
-bool vf_make_out_image_writeable(struct vf_instance *vf, struct mp_image *img);
-void vf_add_output_frame(struct vf_instance *vf, struct mp_image *img);
-
-// default wrappers:
-int vf_next_query_format(struct vf_instance *vf, unsigned int fmt);
-
-#endif /* MPLAYER_VF_H */
diff --git a/video/filter/vf_convert.c b/video/filter/vf_convert.c
deleted file mode 100644
index 7a7fdce228..0000000000
--- a/video/filter/vf_convert.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * This file is part of mpv.
- *
- * mpv is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * mpv is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <math.h>
-#include <inttypes.h>
-#include <stdarg.h>
-#include <assert.h>
-
-#include <libswscale/swscale.h>
-
-#include "common/av_common.h"
-#include "common/msg.h"
-
-#include "options/options.h"
-
-#include "video/img_format.h"
-#include "video/mp_image.h"
-#include "video/sws_utils.h"
-#include "video/fmt-conversion.h"
-#include "vf.h"
-
-struct vf_priv_s {
- struct mp_sws_context *sws;
-};
-
-static int find_best_out(vf_instance_t *vf, int in_format)
-{
- int best = 0;
- for (int out_format = IMGFMT_START; out_format < IMGFMT_END; out_format++) {
- if (!vf_next_query_format(vf, out_format))
- continue;
- if (sws_isSupportedOutput(imgfmt2pixfmt(out_format)) < 1)
- continue;
- if (best) {
- int candidate = mp_imgfmt_select_best(best, out_format, in_format);
- if (candidate)
- best = candidate;
- } else {
- best = out_format;
- }
- }
- return best;
-}
-
-static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
- struct mp_image_params *out)
-{
- unsigned int best = find_best_out(vf, in->imgfmt);
- if (!best) {
- MP_WARN(vf, "no supported output format found\n");
- return -1;
- }
-
- *out = *in;
- out->imgfmt = best;
-
- // If we convert from RGB to YUV, default to limited range.
- if (mp_imgfmt_get_forced_csp(in->imgfmt) == MP_CSP_RGB &&
- mp_imgfmt_get_forced_csp(out->imgfmt) == MP_CSP_AUTO)
- out->color.levels = MP_CSP_LEVELS_TV;
-
- mp_image_params_guess_csp(out);
-
- mp_sws_set_from_cmdline(vf->priv->sws, vf->chain->global);
- vf->priv->sws->src = *in;
- vf->priv->sws->dst = *out;
-
- if (mp_sws_reinit(vf->priv->sws) < 0) {
- // error...
- MP_WARN(vf, "Couldn't init libswscale for this setup\n");
- return -1;
- }
- return 0;
-}
-
-static struct mp_image *filter(struct vf_instance *vf, struct mp_image *mpi)
-{
- struct mp_image *dmpi = vf_alloc_out_image(vf);
- if (!dmpi)
- return NULL;
- mp_image_copy_attributes(dmpi, mpi);
-
- mp_sws_scale(vf->priv->sws, dmpi, mpi);
-
- talloc_free(mpi);
- return dmpi;
-}
-
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- if (IMGFMT_IS_HWACCEL(fmt) || sws_isSupportedInput(imgfmt2pixfmt(fmt)) < 1)
- return 0;
- return !!find_best_out(vf, fmt);
-}
-
-static void uninit(struct vf_instance *vf)
-{
-}
-
-static int vf_open(vf_instance_t *vf)
-{
- vf->reconfig = reconfig;
- vf->filter = filter;
- vf->query_format = query_format;
- vf->uninit = uninit;
- vf->priv->sws = mp_sws_alloc(vf);
- vf->priv->sws->log = vf->log;
- return 1;
-}
-
-const vf_info_t vf_info_convert = {
- .description = "image format conversion with libswscale",
- .name = "convert",
- .open = vf_open,
- .priv_size = sizeof(struct vf_priv_s),
-};
diff --git a/video/filter/vf_d3d11vpp.c b/video/filter/vf_d3d11vpp.c
index 3be49ede80..fb96a44e65 100644
--- a/video/filter/vf_d3d11vpp.c
+++ b/video/filter/vf_d3d11vpp.c
@@ -25,9 +25,13 @@
#include "common/common.h"
#include "osdep/timer.h"
#include "osdep/windows_utils.h"
-#include "vf.h"
+#include "filters/f_autoconvert.h"
+#include "filters/filter.h"
+#include "filters/filter_internal.h"
+#include "filters/user_filters.h"
#include "refqueue.h"
#include "video/hwdec.h"
+#include "video/mp_image.h"
#include "video/mp_image_pool.h"
// missing in MinGW
@@ -38,8 +42,17 @@
#define D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_INVERSE_TELECINE 0x10
#define D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_FRAME_RATE_CONVERSION 0x20
-struct vf_priv_s {
+struct opts {
+ int deint_enabled;
+ int interlaced_only;
+ int mode;
+};
+
+struct priv {
+ struct opts *opts;
+
ID3D11Device *vo_dev;
+ const int *vo_formats;
ID3D11DeviceContext *device_ctx;
ID3D11VideoDevice *video_dev;
@@ -61,10 +74,6 @@ struct vf_priv_s {
struct mp_image_pool *pool;
struct mp_refqueue *queue;
-
- int deint_enabled;
- int interlaced_only;
- int mode;
};
static void release_tex(void *arg)
@@ -76,8 +85,8 @@ static void release_tex(void *arg)
static struct mp_image *alloc_pool(void *pctx, int fmt, int w, int h)
{
- struct vf_instance *vf = pctx;
- struct vf_priv_s *p = vf->priv;
+ struct mp_filter *vf = pctx;
+ struct priv *p = vf->priv;
HRESULT hr;
ID3D11Texture2D *texture = NULL;
@@ -100,7 +109,7 @@ static struct mp_image *alloc_pool(void *pctx, int fmt, int w, int h)
if (!mpi)
abort();
- mp_image_setfmt(mpi, p->out_params.imgfmt);
+ mp_image_setfmt(mpi, IMGFMT_D3D11);
mp_image_set_size(mpi, w, h);
mpi->params.hw_subfmt = p->out_params.hw_subfmt;
@@ -110,29 +119,15 @@ static struct mp_image *alloc_pool(void *pctx, int fmt, int w, int h)
return mpi;
}
-static void flush_frames(struct vf_instance *vf)
+static void flush_frames(struct mp_filter *vf)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
mp_refqueue_flush(p->queue);
}
-static int filter_ext(struct vf_instance *vf, struct mp_image *in)
-{
- struct vf_priv_s *p = vf->priv;
-
- mp_refqueue_set_refs(p->queue, 0, 0);
- mp_refqueue_set_mode(p->queue,
- (p->deint_enabled ? MP_MODE_DEINT : 0) |
- MP_MODE_OUTPUT_FIELDS |
- (p->interlaced_only ? MP_MODE_INTERLACED_ONLY : 0));
-
- mp_refqueue_add_input(p->queue, in);
- return 0;
-}
-
-static void destroy_video_proc(struct vf_instance *vf)
+static void destroy_video_proc(struct mp_filter *vf)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
if (p->video_proc)
ID3D11VideoProcessor_Release(p->video_proc);
@@ -143,9 +138,9 @@ static void destroy_video_proc(struct vf_instance *vf)
p->vp_enum = NULL;
}
-static int recreate_video_proc(struct vf_instance *vf)
+static int recreate_video_proc(struct mp_filter *vf)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
HRESULT hr;
destroy_video_proc(vf);
@@ -168,7 +163,7 @@ static int recreate_video_proc(struct vf_instance *vf)
goto fail;
MP_VERBOSE(vf, "Found %d rate conversion caps. Looking for caps=0x%x.\n",
- (int)caps.RateConversionCapsCount, p->mode);
+ (int)caps.RateConversionCapsCount, p->opts->mode);
int rindex = -1;
for (int n = 0; n < caps.RateConversionCapsCount; n++) {
@@ -178,7 +173,7 @@ static int recreate_video_proc(struct vf_instance *vf)
if (FAILED(hr))
goto fail;
MP_VERBOSE(vf, " - %d: 0x%08x\n", n, (unsigned)rcaps.ProcessorCaps);
- if (rcaps.ProcessorCaps & p->mode) {
+ if (rcaps.ProcessorCaps & p->opts->mode) {
MP_VERBOSE(vf, " (matching)\n");
if (rindex < 0)
rindex = n;
@@ -248,17 +243,19 @@ fail:
return -1;
}
-static int render(struct vf_instance *vf)
+static struct mp_image *render(struct mp_filter *vf)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
int res = -1;
HRESULT hr;
ID3D11VideoProcessorInputView *in_view = NULL;
ID3D11VideoProcessorOutputView *out_view = NULL;
struct mp_image *in = NULL, *out = NULL;
- out = mp_image_pool_get(p->pool, p->out_params.imgfmt, p->params.w, p->params.h);
- if (!out)
+ out = mp_image_pool_get(p->pool, IMGFMT_D3D11, p->params.w, p->params.h);
+ if (!out) {
+ MP_WARN(vf, "failed to allocate frame\n");
goto cleanup;
+ }
ID3D11Texture2D *d3d_out_tex = (void *)out->planes[0];
@@ -325,8 +322,10 @@ static int render(struct vf_instance *vf)
(ID3D11Resource *)d3d_out_tex,
p->vp_enum, &outdesc,
&out_view);
- if (FAILED(hr))
+ if (FAILED(hr)) {
+ MP_ERR(vf, "Could not create ID3D11VideoProcessorOutputView\n");
goto cleanup;
+ }
D3D11_VIDEO_PROCESSOR_STREAM stream = {
.Enable = TRUE,
@@ -346,87 +345,73 @@ cleanup:
ID3D11VideoProcessorInputView_Release(in_view);
if (out_view)
ID3D11VideoProcessorOutputView_Release(out_view);
- if (res >= 0) {
- vf_add_output_frame(vf, out);
- } else {
- talloc_free(out);
- }
- mp_refqueue_next_field(p->queue);
- return res;
+ if (res < 0)
+ TA_FREEP(&out);
+ return out;
}
-static int filter_out(struct vf_instance *vf)
+static bool vo_supports(struct priv *p, int subfmt)
{
- struct vf_priv_s *p = vf->priv;
-
- if (!mp_refqueue_has_output(p->queue))
- return 0;
-
- // no filtering
- if (!mp_refqueue_should_deint(p->queue) && !p->require_filtering) {
- struct mp_image *in = mp_image_new_ref(mp_refqueue_get(p->queue, 0));
- if (!in)
- return -1;
- mp_image_set_params(in, &p->out_params);
- vf_add_output_frame(vf, in);
- mp_refqueue_next(p->queue);
- return 0;
+ for (int n = 0; p->vo_formats && p->vo_formats[n]; n++) {
+ if (p->vo_formats[n] == subfmt)
+ return true;
}
-
- return render(vf);
+ return false;
}
-static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
- struct mp_image_params *out)
+static void vf_d3d11vpp_process(struct mp_filter *vf)
{
- struct vf_priv_s *p = vf->priv;
-
- flush_frames(vf);
- talloc_free(p->pool);
- p->pool = NULL;
+ struct priv *p = vf->priv;
+
+ struct mp_image *in_fmt = mp_refqueue_execute_reinit(p->queue);
+ if (in_fmt) {
+ mp_image_pool_clear(p->pool);
+
+ destroy_video_proc(vf);
+
+ p->params = in_fmt->params;
+ p->out_params = p->params;
+
+ if (vo_supports(p, IMGFMT_NV12)) {
+ p->out_params.hw_subfmt = IMGFMT_NV12;
+ p->out_format = DXGI_FORMAT_NV12;
+ p->out_shared = false;
+ p->out_rgb = false;
+ } else {
+ p->out_params.hw_subfmt = IMGFMT_RGB0;
+ p->out_format = DXGI_FORMAT_B8G8R8A8_UNORM;
+ p->out_shared = true;
+ p->out_rgb = true;
+ }
+ p->out_params.hw_flags = 0;
- destroy_video_proc(vf);
+ p->require_filtering = p->params.hw_subfmt != p->out_params.hw_subfmt;
+ }
- *out = *in;
+ if (!mp_refqueue_can_output(p->queue))
+ return;
- if (vf_next_query_format(vf, IMGFMT_D3D11VA) ||
- vf_next_query_format(vf, IMGFMT_D3D11NV12))
- {
- out->imgfmt = vf_next_query_format(vf, IMGFMT_D3D11VA)
- ? IMGFMT_D3D11VA : IMGFMT_D3D11NV12;
- out->hw_subfmt = IMGFMT_NV12;
- p->out_format = DXGI_FORMAT_NV12;
- p->out_shared = false;
- p->out_rgb = false;
+ if (!mp_refqueue_should_deint(p->queue) && !p->require_filtering) {
+ // no filtering
+ struct mp_image *in = mp_image_new_ref(mp_refqueue_get(p->queue, 0));
+ if (!in) {
+ mp_filter_internal_mark_failed(vf);
+ return;
+ }
+ mp_refqueue_write_out_pin(p->queue, in);
} else {
- out->imgfmt = IMGFMT_D3D11RGB;
- out->hw_subfmt = IMGFMT_RGB0;
- p->out_format = DXGI_FORMAT_B8G8R8A8_UNORM;
- p->out_shared = true;
- p->out_rgb = true;
+ mp_refqueue_write_out_pin(p->queue, render(vf));
}
- out->hw_flags = 0;
-
- p->require_filtering = in->hw_subfmt != out->hw_subfmt;
-
- p->params = *in;
- p->out_params = *out;
-
- p->pool = mp_image_pool_new(vf);
- mp_image_pool_set_allocator(p->pool, alloc_pool, vf);
- mp_image_pool_set_lru(p->pool);
-
- return 0;
}
-static void uninit(struct vf_instance *vf)
+static void uninit(struct mp_filter *vf)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
destroy_video_proc(vf);
flush_frames(vf);
- mp_refqueue_free(p->queue);
+ talloc_free(p->queue);
talloc_free(p->pool);
if (p->video_ctx)
@@ -442,69 +427,55 @@ static void uninit(struct vf_instance *vf)
ID3D11Device_Release(p->vo_dev);
}
-static int query_format(struct vf_instance *vf, unsigned int imgfmt)
+static const struct mp_filter_info vf_d3d11vpp_filter = {
+ .name = "d3d11vpp",
+ .process = vf_d3d11vpp_process,
+ .reset = flush_frames,
+ .destroy = uninit,
+ .priv_size = sizeof(struct priv),
+};
+
+static struct mp_filter *vf_d3d11vpp_create(struct mp_filter *parent,
+ void *options)
{
- if (imgfmt == IMGFMT_D3D11VA ||
- imgfmt == IMGFMT_D3D11NV12 ||
- imgfmt == IMGFMT_D3D11RGB)
- {
- return vf_next_query_format(vf, IMGFMT_D3D11VA) ||
- vf_next_query_format(vf, IMGFMT_D3D11NV12) ||
- vf_next_query_format(vf, IMGFMT_D3D11RGB);
+ struct mp_filter *f = mp_filter_create(parent, &vf_d3d11vpp_filter);
+ if (!f) {
+ talloc_free(options);
+ return NULL;
}
- return 0;
-}
-static bool test_conversion(int in, int out)
-{
- return (in == IMGFMT_D3D11VA ||
- in == IMGFMT_D3D11NV12 ||
- in == IMGFMT_D3D11RGB) &&
- (out == IMGFMT_D3D11VA ||
- out == IMGFMT_D3D11NV12 ||
- out == IMGFMT_D3D11RGB);
-}
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
-static int control(struct vf_instance *vf, int request, void* data)
-{
- switch (request){
- case VFCTRL_SEEK_RESET:
- flush_frames(vf);
- return true;
- default:
- return CONTROL_UNKNOWN;
- }
-}
+ struct priv *p = f->priv;
+ p->opts = talloc_steal(p, options);
-static int vf_open(vf_instance_t *vf)
-{
- struct vf_priv_s *p = vf->priv;
+ // Special path for vf_d3d11_create_outconv(): disable all processing except
+ // possibly surface format conversions.
+ if (!p->opts) {
+ static const struct opts opts = {0};
+ p->opts = (struct opts *)&opts;
+ }
- if (!vf->hwdec_devs)
- return 0;
+ p->queue = mp_refqueue_alloc(f);
- vf->reconfig = reconfig;
- vf->filter_ext = filter_ext;
- vf->filter_out = filter_out;
- vf->query_format = query_format;
- vf->uninit = uninit;
- vf->control = control;
+ struct mp_stream_info *info = mp_filter_find_stream_info(f);
+ if (!info || !info->hwdec_devs)
+ goto fail;
- hwdec_devices_request_all(vf->hwdec_devs);
- AVBufferRef *ref =
- hwdec_devices_get_lavc(vf->hwdec_devs, AV_HWDEVICE_TYPE_D3D11VA);
- if (!ref)
- return 0;
+ hwdec_devices_request_all(info->hwdec_devs);
- AVHWDeviceContext *hwctx = (void *)ref->data;
- AVD3D11VADeviceContext *d3dctx = hwctx->hwctx;
+ struct mp_hwdec_ctx *hwctx =
+ hwdec_devices_get_by_lavc(info->hwdec_devs, AV_HWDEVICE_TYPE_D3D11VA);
+ if (!hwctx || !hwctx->av_device_ref)
+ goto fail;
+ AVHWDeviceContext *avhwctx = (void *)hwctx->av_device_ref->data;
+ AVD3D11VADeviceContext *d3dctx = avhwctx->hwctx;
p->vo_dev = d3dctx->device;
ID3D11Device_AddRef(p->vo_dev);
- av_buffer_unref(&ref);
-
- p->queue = mp_refqueue_alloc();
+ p->vo_formats = hwctx->supported_formats;
HRESULT hr;
@@ -521,14 +492,26 @@ static int vf_open(vf_instance_t *vf)
if (FAILED(hr))
goto fail;
- return 1;
+ p->pool = mp_image_pool_new(f);
+ mp_image_pool_set_allocator(p->pool, alloc_pool, f);
+ mp_image_pool_set_lru(p->pool);
+
+ mp_refqueue_add_in_format(p->queue, IMGFMT_D3D11, 0);
+
+ mp_refqueue_set_refs(p->queue, 0, 0);
+ mp_refqueue_set_mode(p->queue,
+ (p->opts->deint_enabled ? MP_MODE_DEINT : 0) |
+ MP_MODE_OUTPUT_FIELDS |
+ (p->opts->interlaced_only ? MP_MODE_INTERLACED_ONLY : 0));
+
+ return f;
fail:
- uninit(vf);
- return 0;
+ talloc_free(f);
+ return NULL;
}
-#define OPT_BASE_STRUCT struct vf_priv_s
+#define OPT_BASE_STRUCT struct opts
static const m_option_t vf_opts_fields[] = {
OPT_FLAG("deint", deint_enabled, 0),
OPT_FLAG("interlaced-only", interlaced_only, 0),
@@ -542,16 +525,25 @@ static const m_option_t vf_opts_fields[] = {
{0}
};
-const vf_info_t vf_info_d3d11vpp = {
- .description = "D3D11 Video Post-Process Filter",
- .name = "d3d11vpp",
- .test_conversion = test_conversion,
- .open = vf_open,
- .priv_size = sizeof(struct vf_priv_s),
- .priv_defaults = &(const struct vf_priv_s) {
- .deint_enabled = 1,
- .interlaced_only = 1,
- .mode = D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_BOB,
+const struct mp_user_filter_entry vf_d3d11vpp = {
+ .desc = {
+ .description = "D3D11 Video Post-Process Filter",
+ .name = "d3d11vpp",
+ .priv_size = sizeof(OPT_BASE_STRUCT),
+ .priv_defaults = &(const OPT_BASE_STRUCT) {
+ .deint_enabled = 1,
+ .interlaced_only = 1,
+ .mode = D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_BOB,
+ },
+ .options = vf_opts_fields,
},
- .options = vf_opts_fields,
+ .create = vf_d3d11vpp_create,
};
+
+// Create a filter for the purpose of converting the sub-format for hwdec
+// interops which are incapable of handling some formats (ANGLE).
+struct mp_filter *vf_d3d11_create_outconv(struct mp_filter *parent)
+{
+ // options==NULL is normally not allowed, and specially handled.
+ return vf_d3d11vpp_create(parent, NULL);
+}
diff --git a/video/filter/vf_format.c b/video/filter/vf_format.c
index aab30855d6..48eb51a795 100644
--- a/video/filter/vf_format.c
+++ b/video/filter/vf_format.c
@@ -25,14 +25,21 @@
#include "common/msg.h"
#include "common/common.h"
-
+#include "filters/f_autoconvert.h"
+#include "filters/filter.h"
+#include "filters/filter_internal.h"
+#include "filters/user_filters.h"
#include "video/img_format.h"
#include "video/mp_image.h"
-#include "vf.h"
#include "options/m_option.h"
-struct vf_priv_s {
+struct priv {
+ struct vf_format_opts *opts;
+ struct mp_pin *in_pin;
+};
+
+struct vf_format_opts {
int fmt;
int outfmt;
int colormatrix;
@@ -51,43 +58,29 @@ struct vf_priv_s {
float spherical_ref_angles[3];
};
-static bool is_compatible(int fmt1, int fmt2)
+static void vf_format_process(struct mp_filter *f)
{
- struct mp_imgfmt_desc d1 = mp_imgfmt_get_desc(fmt1);
- struct mp_imgfmt_desc d2 = mp_imgfmt_get_desc(fmt2);
- if (d1.num_planes < d2.num_planes)
- return false;
- if (!(d1.flags & MP_IMGFLAG_BYTE_ALIGNED) ||
- !(d2.flags & MP_IMGFLAG_BYTE_ALIGNED))
- return false;
- for (int n = 0; n < MPMIN(d1.num_planes, d2.num_planes); n++) {
- if (d1.bytes[n] != d2.bytes[n])
- return false;
- if (d1.xs[n] != d2.xs[n] || d1.ys[n] != d2.ys[n])
- return false;
- }
- return true;
-}
+ struct priv *priv = f->priv;
+ struct vf_format_opts *p = priv->opts;
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- if (fmt == vf->priv->fmt || !vf->priv->fmt) {
- if (vf->priv->outfmt) {
- if (!is_compatible(fmt, vf->priv->outfmt))
- return 0;
- fmt = vf->priv->outfmt;
- }
- return vf_next_query_format(vf, fmt);
- }
- return 0;
-}
+ if (!mp_pin_can_transfer_data(f->ppins[1], priv->in_pin))
+ return;
-static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
- struct mp_image_params *out)
-{
- struct vf_priv_s *p = vf->priv;
+ struct mp_frame frame = mp_pin_out_read(priv->in_pin);
+
+ if (mp_frame_is_signaling(frame)) {
+ mp_pin_in_write(f->ppins[1], frame);
+ return;
+ }
+ if (frame.type != MP_FRAME_VIDEO) {
+ MP_ERR(f, "unsupported frame type\n");
+ mp_frame_unref(&frame);
+ mp_filter_internal_mark_failed(f);
+ return;
+ }
- *out = *in;
+ struct mp_image *img = frame.data;
+ struct mp_image_params *out = &img->params;
if (p->outfmt)
out->imgfmt = p->outfmt;
@@ -98,8 +91,9 @@ static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
if (p->primaries)
out->color.primaries = p->primaries;
if (p->gamma) {
+ enum mp_csp_trc in_gamma = p->gamma;
out->color.gamma = p->gamma;
- if (in->color.gamma != out->color.gamma) {
+ if (in_gamma != out->color.gamma) {
// When changing the gamma function explicitly, also reset stuff
// related to the gamma function since that information will almost
// surely be false now and have to be re-inferred
@@ -140,28 +134,47 @@ static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
// Make sure the user-overrides are consistent (no RGB csp for YUV, etc.).
mp_image_params_guess_csp(out);
- return 0;
+ mp_pin_in_write(f->ppins[1], frame);
}
-static struct mp_image *filter(struct vf_instance *vf, struct mp_image *mpi)
-{
- if (vf->priv->outfmt)
- mp_image_setfmt(mpi, vf->priv->outfmt);
- return mpi;
-}
+static const struct mp_filter_info vf_format_filter = {
+ .name = "format",
+ .process = vf_format_process,
+ .priv_size = sizeof(struct priv),
+};
-static int vf_open(vf_instance_t *vf)
+static struct mp_filter *vf_format_create(struct mp_filter *parent, void *options)
{
- vf->query_format = query_format;
- vf->reconfig = reconfig;
- vf->filter = filter;
- return 1;
+ struct mp_filter *f = mp_filter_create(parent, &vf_format_filter);
+ if (!f) {
+ talloc_free(options);
+ return NULL;
+ }
+
+ struct priv *priv = f->priv;
+ priv->opts = talloc_steal(priv, options);
+
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
+
+ struct mp_autoconvert *conv = mp_autoconvert_create(f);
+ if (!conv) {
+ talloc_free(f);
+ return NULL;
+ }
+
+ if (priv->opts->fmt)
+ mp_autoconvert_add_imgfmt(conv, priv->opts->fmt, 0);
+
+ priv->in_pin = conv->f->pins[1];
+ mp_pin_connect(conv->f->pins[0], f->ppins[0]);
+
+ return f;
}
-#define OPT_BASE_STRUCT struct vf_priv_s
+#define OPT_BASE_STRUCT struct vf_format_opts
static const m_option_t vf_opts_fields[] = {
OPT_IMAGEFORMAT("fmt", fmt, 0),
- OPT_IMAGEFORMAT("outfmt", outfmt, 0),
OPT_CHOICE_C("colormatrix", colormatrix, 0, mp_csp_names),
OPT_CHOICE_C("colorlevels", colorlevels, 0, mp_csp_levels_names),
OPT_CHOICE_C("primaries", primaries, 0, mp_csp_prim_names),
@@ -184,14 +197,16 @@ static const m_option_t vf_opts_fields[] = {
{0}
};
-const vf_info_t vf_info_format = {
- .description = "force output format",
- .name = "format",
- .open = vf_open,
- .priv_size = sizeof(struct vf_priv_s),
- .options = vf_opts_fields,
- .priv_defaults = &(const struct vf_priv_s){
- .rotate = -1,
- .spherical_ref_angles = {NAN, NAN, NAN},
+const struct mp_user_filter_entry vf_format = {
+ .desc = {
+ .description = "force output format",
+ .name = "format",
+ .priv_size = sizeof(OPT_BASE_STRUCT),
+ .priv_defaults = &(const OPT_BASE_STRUCT){
+ .rotate = -1,
+ .spherical_ref_angles = {NAN, NAN, NAN},
+ },
+ .options = vf_opts_fields,
},
+ .create = vf_format_create,
};
diff --git a/video/filter/vf_lavfi.c b/video/filter/vf_lavfi.c
deleted file mode 100644
index 0cd3af8673..0000000000
--- a/video/filter/vf_lavfi.c
+++ /dev/null
@@ -1,517 +0,0 @@
-/*
- * This file is part of mpv.
- *
- * Filter graph creation code taken from Libav avplay.c (LGPL 2.1 or later)
- *
- * mpv is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * mpv is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <math.h>
-#include <inttypes.h>
-#include <stdarg.h>
-#include <assert.h>
-
-#include <libavutil/avstring.h>
-#include <libavutil/hwcontext.h>
-#include <libavutil/mem.h>
-#include <libavutil/mathematics.h>
-#include <libavutil/rational.h>
-#include <libavutil/pixdesc.h>
-#include <libavutil/time.h>
-#include <libavutil/error.h>
-#include <libswscale/swscale.h>
-#include <libavfilter/avfilter.h>
-#include <libavfilter/buffersink.h>
-#include <libavfilter/buffersrc.h>
-
-#include "config.h"
-#include "common/av_common.h"
-#include "common/msg.h"
-#include "options/m_option.h"
-#include "common/tags.h"
-
-#include "video/hwdec.h"
-#include "video/img_format.h"
-#include "video/mp_image.h"
-#include "video/sws_utils.h"
-#include "video/fmt-conversion.h"
-#include "vf.h"
-
-// FFmpeg and Libav have slightly different APIs, just enough to cause us
-// unnecessary pain. <Expletive deleted.>
-#if LIBAVFILTER_VERSION_MICRO < 100
-#define graph_parse(graph, filters, inputs, outputs, log_ctx) \
- avfilter_graph_parse(graph, filters, inputs, outputs, log_ctx)
-#define avfilter_graph_send_command(a, b, c, d, e, f, g) -1
-#else
-#define graph_parse(graph, filters, inputs, outputs, log_ctx) \
- avfilter_graph_parse_ptr(graph, filters, &(inputs), &(outputs), log_ctx)
-#endif
-
-struct vf_priv_s {
- // Single filter bridge, instead of a graph.
- bool is_bridge;
-
- AVFilterGraph *graph;
- AVFilterContext *in;
- AVFilterContext *out;
- bool eof;
-
- AVRational timebase_in;
- AVRational timebase_out;
- AVRational par_in;
-
- struct mp_tags *metadata;
-
- // for the lw wrapper
- void *old_priv;
- int (*lw_reconfig_cb)(struct vf_instance *vf,
- struct mp_image_params *in,
- struct mp_image_params *out);
-
- // options
- char *cfg_graph;
- int64_t cfg_sws_flags;
- char **cfg_avopts;
-
- char *cfg_filter_name;
- char **cfg_filter_opts;
-};
-
-static void destroy_graph(struct vf_instance *vf)
-{
- struct vf_priv_s *p = vf->priv;
- avfilter_graph_free(&p->graph);
- p->in = p->out = NULL;
-
- if (p->metadata) {
- talloc_free(p->metadata);
- p->metadata = NULL;
- }
-
- p->eof = false;
-}
-
-static bool recreate_graph(struct vf_instance *vf, struct mp_image_params *fmt)
-{
- void *tmp = talloc_new(NULL);
- struct vf_priv_s *p = vf->priv;
- AVFilterContext *in = NULL, *out = NULL;
- int ret;
-
- if (!p->is_bridge && bstr0(p->cfg_graph).len == 0) {
- MP_FATAL(vf, "lavfi: no filter graph set\n");
- return false;
- }
-
- destroy_graph(vf);
-
- AVFilterGraph *graph = avfilter_graph_alloc();
- if (!graph)
- goto error;
-
- if (mp_set_avopts(vf->log, graph, p->cfg_avopts) < 0)
- goto error;
-
- AVFilterInOut *outputs = avfilter_inout_alloc();
- AVFilterInOut *inputs = avfilter_inout_alloc();
- if (!outputs || !inputs)
- goto error;
-
- char *sws_flags = talloc_asprintf(tmp, "flags=%"PRId64, p->cfg_sws_flags);
- graph->scale_sws_opts = av_strdup(sws_flags);
-
- in = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffer"), "src");
- if (!in)
- goto error;
-
- AVBufferSrcParameters *in_params = av_buffersrc_parameters_alloc();
- if (!in_params)
- goto error;
-
- in_params->format = imgfmt2pixfmt(fmt->imgfmt);
- in_params->time_base = AV_TIME_BASE_Q;
- in_params->width = fmt->w;
- in_params->height = fmt->h;
- in_params->sample_aspect_ratio.num = fmt->p_w;
- in_params->sample_aspect_ratio.den = fmt->p_h;
- // Assume it's ignored for non-hwaccel formats.
- in_params->hw_frames_ctx = vf->in_hwframes_ref;
-
- ret = av_buffersrc_parameters_set(in, in_params);
- av_free(in_params);
- if (ret < 0)
- goto error;
-
- if (avfilter_init_str(in, NULL) < 0)
- goto error;
-
- if (avfilter_graph_create_filter(&out, avfilter_get_by_name("buffersink"),
- "out", NULL, NULL, graph) < 0)
- goto error;
-
- if (p->is_bridge) {
- AVFilterContext *filter = avfilter_graph_alloc_filter(graph,
- avfilter_get_by_name(p->cfg_filter_name), "filter");
- if (!filter)
- goto error;
-
- if (mp_set_avopts(vf->log, filter->priv, p->cfg_filter_opts) < 0)
- goto error;
-
- if (avfilter_init_str(filter, NULL) < 0)
- goto error;
-
- // Yep, we have to manually link those filters.
- if (filter->nb_inputs != 1 ||
- avfilter_pad_get_type(filter->input_pads, 0) != AVMEDIA_TYPE_VIDEO ||
- filter->nb_outputs != 1 ||
- avfilter_pad_get_type(filter->output_pads, 0) != AVMEDIA_TYPE_VIDEO)
- {
- MP_ERR(vf, "The filter is required to have 1 video input pad and "
- "1 video output pad.\n");
- goto error;
- }
- if (avfilter_link(in, 0, filter, 0) < 0 ||
- avfilter_link(filter, 0, out, 0) < 0)
- {
- MP_ERR(vf, "Failed to link filter.\n");
- goto error;
- }
- } else {
- MP_VERBOSE(vf, "lavfi: create graph: '%s'\n", p->cfg_graph);
-
- outputs->name = av_strdup("in");
- outputs->filter_ctx = in;
-
- inputs->name = av_strdup("out");
- inputs->filter_ctx = out;
-
- if (graph_parse(graph, p->cfg_graph, inputs, outputs, NULL) < 0)
- goto error;
- }
-
- if (vf->hwdec_devs) {
- struct mp_hwdec_ctx *hwdec = hwdec_devices_get_first(vf->hwdec_devs);
- for (int n = 0; n < graph->nb_filters; n++) {
- AVFilterContext *filter = graph->filters[n];
- if (hwdec && hwdec->av_device_ref)
- filter->hw_device_ctx = av_buffer_ref(hwdec->av_device_ref);
- }
- }
-
- if (avfilter_graph_config(graph, NULL) < 0)
- goto error;
-
- p->in = in;
- p->out = out;
- p->graph = graph;
-
- assert(out->nb_inputs == 1);
- assert(in->nb_outputs == 1);
-
- talloc_free(tmp);
- return true;
-
-error:
- MP_FATAL(vf, "Can't configure libavfilter graph.\n");
- avfilter_graph_free(&graph);
- talloc_free(tmp);
- return false;
-}
-
-static void reset(vf_instance_t *vf)
-{
- struct vf_priv_s *p = vf->priv;
- struct mp_image_params *f = &vf->fmt_in;
- if (p->graph && f->imgfmt)
- recreate_graph(vf, f);
-}
-
-static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
- struct mp_image_params *out)
-{
- struct vf_priv_s *p = vf->priv;
-
- *out = *in; // pass-through untouched flags
-
- if (vf->priv->lw_reconfig_cb) {
- if (vf->priv->lw_reconfig_cb(vf, in, out) < 0)
- return -1;
- }
-
- if (!recreate_graph(vf, in))
- return -1;
-
- AVFilterLink *l_out = p->out->inputs[0];
- AVFilterLink *l_in = p->in->outputs[0];
-
- p->timebase_in = l_in->time_base;
- p->timebase_out = l_out->time_base;
-
- p->par_in = l_in->sample_aspect_ratio;
-
- out->w = l_out->w;
- out->h = l_out->h;
- out->p_w = l_out->sample_aspect_ratio.num;
- out->p_h = l_out->sample_aspect_ratio.den;
- out->imgfmt = pixfmt2imgfmt(l_out->format);
- av_buffer_unref(&vf->out_hwframes_ref);
-#if LIBAVFILTER_VERSION_INT >= AV_VERSION_INT(6, 69, 100) && \
- LIBAVFILTER_VERSION_MICRO >= 100
- AVBufferRef *hw_frames_ctx = av_buffersink_get_hw_frames_ctx(p->out);
-#else
- AVBufferRef *hw_frames_ctx = l_out->hw_frames_ctx;
-#endif
- if (hw_frames_ctx) {
- AVHWFramesContext *fctx = (void *)hw_frames_ctx->data;
- out->hw_subfmt = pixfmt2imgfmt(fctx->sw_format);
- vf->out_hwframes_ref = av_buffer_ref(hw_frames_ctx);
- }
- return 0;
-}
-
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- // Format negotiation is not possible with libavfilter.
- return 1;
-}
-
-static AVFrame *mp_to_av(struct vf_instance *vf, struct mp_image *img)
-{
- struct vf_priv_s *p = vf->priv;
- if (!img)
- return NULL;
- uint64_t pts = img->pts == MP_NOPTS_VALUE ?
- AV_NOPTS_VALUE : img->pts * av_q2d(av_inv_q(p->timebase_in));
- AVFrame *frame = mp_image_to_av_frame_and_unref(img);
- if (!frame)
- return NULL; // OOM is (coincidentally) handled as EOF
- frame->pts = pts;
- frame->sample_aspect_ratio = p->par_in;
- return frame;
-}
-
-static struct mp_image *av_to_mp(struct vf_instance *vf, AVFrame *av_frame)
-{
- struct vf_priv_s *p = vf->priv;
- struct mp_image *img = mp_image_from_av_frame(av_frame);
- if (!img)
- return NULL; // OOM
- img->pts = av_frame->pts == AV_NOPTS_VALUE ?
- MP_NOPTS_VALUE : av_frame->pts * av_q2d(p->timebase_out);
- av_frame_free(&av_frame);
- return img;
-}
-
-static void get_metadata_from_av_frame(struct vf_instance *vf, AVFrame *frame)
-{
-#if LIBAVUTIL_VERSION_MICRO >= 100
- struct vf_priv_s *p = vf->priv;
- if (!p->metadata)
- p->metadata = talloc_zero(p, struct mp_tags);
-
- mp_tags_copy_from_av_dictionary(p->metadata, frame->metadata);
-#endif
-}
-
-static int filter_ext(struct vf_instance *vf, struct mp_image *mpi)
-{
- struct vf_priv_s *p = vf->priv;
-
- if (p->eof && mpi) {
- // Once EOF is reached, libavfilter is "stuck" in the EOF state, and
- // won't accept new input. Forcefully override it. This helps e.g.
- // with cover art, where we always want to generate new output.
- reset(vf);
- }
-
- if (!p->graph)
- return -1;
-
- if (!mpi) {
- if (p->eof)
- return 0;
- p->eof = true;
- }
-
- AVFrame *frame = mp_to_av(vf, mpi);
- int r = av_buffersrc_add_frame(p->in, frame) < 0 ? -1 : 0;
- av_frame_free(&frame);
-
- return r;
-}
-
-static int filter_out(struct vf_instance *vf)
-{
- struct vf_priv_s *p = vf->priv;
-
- AVFrame *frame = av_frame_alloc();
- int err = av_buffersink_get_frame(p->out, frame);
- if (err == AVERROR(EAGAIN) || err == AVERROR_EOF) {
- // Not an error situation - no more output buffers in queue.
- // AVERROR_EOF means we shouldn't even give the filter more
- // input, but we don't handle that completely correctly.
- av_frame_free(&frame);
- p->eof |= err == AVERROR_EOF;
- return 0;
- }
- if (err < 0) {
- av_frame_free(&frame);
- MP_ERR(vf, "libavfilter error: %d\n", err);
- return -1;
- }
-
- get_metadata_from_av_frame(vf, frame);
- vf_add_output_frame(vf, av_to_mp(vf, frame));
- return 0;
-}
-
-static int control(vf_instance_t *vf, int request, void *data)
-{
- switch (request) {
- case VFCTRL_SEEK_RESET:
- reset(vf);
- return CONTROL_OK;
- case VFCTRL_COMMAND: {
- if (!vf->priv->graph)
- break;
- char **args = data;
- return avfilter_graph_send_command(vf->priv->graph, "all",
- args[0], args[1], &(char){0}, 0, 0)
- >= 0 ? CONTROL_OK : CONTROL_ERROR;
- }
- case VFCTRL_GET_METADATA:
- if (vf->priv && vf->priv->metadata) {
- *(struct mp_tags *)data = *vf->priv->metadata;
- return CONTROL_OK;
- } else {
- return CONTROL_NA;
- }
- }
- return CONTROL_UNKNOWN;
-}
-
-static void uninit(struct vf_instance *vf)
-{
- if (!vf->priv)
- return;
- destroy_graph(vf);
-}
-
-static int vf_open(vf_instance_t *vf)
-{
- struct vf_priv_s *p = vf->priv;
-
- vf->reconfig = reconfig;
- vf->filter_ext = filter_ext;
- vf->filter_out = filter_out;
- vf->filter = NULL;
- vf->query_format = query_format;
- vf->control = control;
- vf->uninit = uninit;
-
- if (p->is_bridge) {
- if (!p->cfg_filter_name) {
- MP_ERR(vf, "Filter name not set!\n");
- return 0;
- }
- if (!avfilter_get_by_name(p->cfg_filter_name)) {
- MP_ERR(vf, "libavfilter filter '%s' not found!\n", p->cfg_filter_name);
- return 0;
- }
- }
-
- return 1;
-}
-
-static bool is_single_video_only(const AVFilterPad *pads)
-{
- int count = avfilter_pad_count(pads);
- if (count != 1)
- return false;
- return avfilter_pad_get_type(pads, 0) == AVMEDIA_TYPE_VIDEO;
-}
-
-// Does it have exactly one video input and one video output?
-static bool is_usable(const AVFilter *filter)
-{
- return is_single_video_only(filter->inputs) &&
- is_single_video_only(filter->outputs);
-}
-
-static void print_help(struct mp_log *log)
-{
- mp_info(log, "List of libavfilter filters:\n");
- for (const AVFilter *filter = avfilter_next(NULL); filter;
- filter = avfilter_next(filter))
- {
- if (is_usable(filter))
- mp_info(log, " %-16s %s\n", filter->name, filter->description);
- }
- mp_info(log, "\n"
- "This lists video->video filters only. Refer to\n"
- "\n"
- " https://ffmpeg.org/ffmpeg-filters.html\n"
- "\n"
- "to see how to use each filter and what arguments each filter takes.\n"
- "Also, be sure to quote the FFmpeg filter string properly, e.g.:\n"
- "\n"
- " \"--vf=lavfi=[gradfun=20:30]\"\n"
- "\n"
- "Otherwise, mpv and libavfilter syntax will conflict.\n"
- "\n");
-}
-
-#define OPT_BASE_STRUCT struct vf_priv_s
-static const m_option_t vf_opts_fields[] = {
- OPT_STRING("graph", cfg_graph, M_OPT_MIN, .min = 1),
- OPT_INT64("sws-flags", cfg_sws_flags, 0),
- OPT_KEYVALUELIST("o", cfg_avopts, 0),
- {0}
-};
-
-const vf_info_t vf_info_lavfi = {
- .description = "libavfilter bridge",
- .name = "lavfi",
- .open = vf_open,
- .priv_size = sizeof(struct vf_priv_s),
- .priv_defaults = &(const struct vf_priv_s){
- .cfg_sws_flags = SWS_BICUBIC,
- },
- .options = vf_opts_fields,
- .print_help = print_help,
-};
-
-const vf_info_t vf_info_lavfi_bridge = {
- .description = "libavfilter bridge (explicit options)",
- .name = "lavfi-bridge",
- .open = vf_open,
- .priv_size = sizeof(struct vf_priv_s),
- .options = (const m_option_t[]) {
- OPT_STRING("name", cfg_filter_name, M_OPT_MIN, .min = 1),
- OPT_KEYVALUELIST("opts", cfg_filter_opts, 0),
- OPT_INT64("sws-flags", cfg_sws_flags, 0),
- OPT_KEYVALUELIST("o", cfg_avopts, 0),
- {0}
- },
- .priv_defaults = &(const struct vf_priv_s){
- .is_bridge = true,
- .cfg_sws_flags = SWS_BICUBIC,
- },
- .print_help = print_help,
-};
diff --git a/video/filter/vf_sub.c b/video/filter/vf_sub.c
index 065a8c66c7..8b2c1c83f0 100644
--- a/video/filter/vf_sub.c
+++ b/video/filter/vf_sub.c
@@ -29,11 +29,13 @@
#include "config.h"
#include "common/msg.h"
+#include "filters/filter.h"
+#include "filters/filter_internal.h"
+#include "filters/user_filters.h"
#include "options/options.h"
-
#include "video/img_format.h"
#include "video/mp_image.h"
-#include "vf.h"
+#include "video/mp_image_pool.h"
#include "sub/osd.h"
#include "sub/dec_sub.h"
@@ -41,112 +43,121 @@
#include "options/m_option.h"
-struct vf_priv_s {
- int opt_top_margin, opt_bottom_margin;
-
- int outh, outw;
+struct vf_sub_opts {
+ int top_margin, bottom_margin;
+};
- struct osd_state *osd;
- struct mp_osd_res dim;
+struct priv {
+ struct vf_sub_opts *opts;
+ struct mp_image_pool *pool;
};
-static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
- struct mp_image_params *out)
+static void vf_sub_process(struct mp_filter *f)
{
- int width = in->w, height = in->h;
-
- vf->priv->outh = height + vf->priv->opt_top_margin +
- vf->priv->opt_bottom_margin;
- vf->priv->outw = width;
-
- vf->priv->dim = (struct mp_osd_res) {
- .w = vf->priv->outw,
- .h = vf->priv->outh,
- .mt = vf->priv->opt_top_margin,
- .mb = vf->priv->opt_bottom_margin,
- .display_par = in->p_w / (double)in->p_h,
- };
+ struct priv *priv = f->priv;
- *out = *in;
- out->w = vf->priv->outw;
- out->h = vf->priv->outh;
- return 0;
-}
+ if (!mp_pin_can_transfer_data(f->ppins[1], f->ppins[0]))
+ return;
-static void prepare_image(struct vf_instance *vf, struct mp_image *dmpi,
- struct mp_image *mpi)
-{
- int y1 = MP_ALIGN_DOWN(vf->priv->opt_top_margin, mpi->fmt.align_y);
- int y2 = MP_ALIGN_DOWN(y1 + mpi->h, mpi->fmt.align_y);
- struct mp_image cropped = *dmpi;
- mp_image_crop(&cropped, 0, y1, mpi->w, y1 + mpi->h);
- mp_image_copy(&cropped, mpi);
- mp_image_clear(dmpi, 0, 0, dmpi->w, y1);
- mp_image_clear(dmpi, 0, y2, dmpi->w, vf->priv->outh);
-}
+ struct mp_frame frame = mp_pin_out_read(f->ppins[0]);
-static struct mp_image *filter(struct vf_instance *vf, struct mp_image *mpi)
-{
- struct vf_priv_s *priv = vf->priv;
- struct osd_state *osd = priv->osd;
+ if (mp_frame_is_signaling(frame)) {
+ mp_pin_in_write(f->ppins[1], frame);
+ return;
+ }
+
+ struct mp_stream_info *info = mp_filter_find_stream_info(f);
+ struct osd_state *osd = info ? info->osd : NULL;
+
+ if (!osd)
+ goto error;
+
+ osd_set_render_subs_in_filter(osd, true);
+
+ if (frame.type != MP_FRAME_VIDEO)
+ goto error;
+
+ struct mp_image *mpi = frame.data;
- if (vf->priv->opt_top_margin || vf->priv->opt_bottom_margin) {
- struct mp_image *dmpi = vf_alloc_out_image(vf);
+ if (!mp_sws_supported_format(mpi->imgfmt))
+ goto error;
+
+ struct mp_osd_res dim = {
+ .w = mpi->w,
+ .h = mpi->h + priv->opts->top_margin + priv->opts->bottom_margin,
+ .mt = priv->opts->top_margin,
+ .mb = priv->opts->bottom_margin,
+ .display_par = mpi->params.p_w / (double)mpi->params.p_h,
+ };
+
+ if (dim.w != mpi->w || dim.h != mpi->h) {
+ struct mp_image *dmpi =
+ mp_image_pool_get(priv->pool, mpi->imgfmt, dim.w, dim.h);
if (!dmpi)
- return NULL;
+ goto error;
mp_image_copy_attributes(dmpi, mpi);
- prepare_image(vf, dmpi, mpi);
- talloc_free(mpi);
+ int y1 = MP_ALIGN_DOWN(priv->opts->top_margin, mpi->fmt.align_y);
+ int y2 = MP_ALIGN_DOWN(y1 + mpi->h, mpi->fmt.align_y);
+ struct mp_image cropped = *dmpi;
+ mp_image_crop(&cropped, 0, y1, mpi->w, y1 + mpi->h);
+ mp_image_copy(&cropped, mpi);
+ mp_image_clear(dmpi, 0, 0, dmpi->w, y1);
+ mp_image_clear(dmpi, 0, y2, dmpi->w, dim.h);
+ mp_frame_unref(&frame);
mpi = dmpi;
+ frame = (struct mp_frame){MP_FRAME_VIDEO, mpi};
}
- if (!osd)
- return mpi;
+ osd_draw_on_image_p(osd, dim, mpi->pts, OSD_DRAW_SUB_FILTER, priv->pool, mpi);
- osd_draw_on_image_p(osd, priv->dim, mpi->pts, OSD_DRAW_SUB_FILTER,
- vf->out_pool, mpi);
+ mp_pin_in_write(f->ppins[1], frame);
+ return;
- return mpi;
+error:
+ MP_ERR(f, "unsupported format, missing OSD, or failed allocation\n");
+ mp_frame_unref(&frame);
+ mp_filter_internal_mark_failed(f);
}
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- if (!mp_sws_supported_format(fmt))
- return 0;
- return vf_next_query_format(vf, fmt);
-}
+static const struct mp_filter_info vf_sub_filter = {
+ .name = "sub",
+ .process = vf_sub_process,
+ .priv_size = sizeof(struct priv),
+};
-static int control(vf_instance_t *vf, int request, void *data)
+static struct mp_filter *vf_sub_create(struct mp_filter *parent, void *options)
{
- switch (request) {
- case VFCTRL_INIT_OSD:
- vf->priv->osd = data;
- return CONTROL_TRUE;
+ struct mp_filter *f = mp_filter_create(parent, &vf_sub_filter);
+ if (!f) {
+ talloc_free(options);
+ return NULL;
}
- return CONTROL_UNKNOWN;
-}
-static int vf_open(vf_instance_t *vf)
-{
- MP_WARN(vf, "This filter is deprecated and will be removed (no replacement)\n");
- vf->reconfig = reconfig;
- vf->query_format = query_format;
- vf->control = control;
- vf->filter = filter;
- return 1;
+ MP_WARN(f, "This filter is deprecated and will be removed (no replacement)\n");
+
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
+
+ struct priv *priv = f->priv;
+ priv->opts = talloc_steal(priv, options);
+ priv->pool = mp_image_pool_new(priv);
+
+ return f;
}
-#define OPT_BASE_STRUCT struct vf_priv_s
+#define OPT_BASE_STRUCT struct vf_sub_opts
static const m_option_t vf_opts_fields[] = {
- OPT_INTRANGE("bottom-margin", opt_bottom_margin, 0, 0, 2000),
- OPT_INTRANGE("top-margin", opt_top_margin, 0, 0, 2000),
+ OPT_INTRANGE("bottom-margin", bottom_margin, 0, 0, 2000),
+ OPT_INTRANGE("top-margin", top_margin, 0, 0, 2000),
{0}
};
-const vf_info_t vf_info_sub = {
- .description = "Render subtitles",
- .name = "sub",
- .open = vf_open,
- .priv_size = sizeof(struct vf_priv_s),
- .options = vf_opts_fields,
+const struct mp_user_filter_entry vf_sub = {
+ .desc = {
+ .description = "Render subtitles",
+ .name = "sub",
+ .priv_size = sizeof(OPT_BASE_STRUCT),
+ .options = vf_opts_fields,
+ },
+ .create = vf_sub_create,
};
diff --git a/video/filter/vf_vapoursynth.c b/video/filter/vf_vapoursynth.c
index b5aad7abbf..4de09794e6 100644
--- a/video/filter/vf_vapoursynth.c
+++ b/video/filter/vf_vapoursynth.c
@@ -34,13 +34,27 @@
#include "common/msg.h"
#include "options/m_option.h"
#include "options/path.h"
-
+#include "filters/f_autoconvert.h"
+#include "filters/f_utils.h"
+#include "filters/filter.h"
+#include "filters/filter_internal.h"
+#include "filters/user_filters.h"
#include "video/img_format.h"
#include "video/mp_image.h"
#include "video/sws_utils.h"
-#include "vf.h"
-struct vf_priv_s {
+struct vapoursynth_opts {
+ char *file;
+ int maxbuffer;
+ int maxrequests;
+
+ const struct script_driver *drv;
+};
+
+struct priv {
+ struct mp_log *log;
+ struct vapoursynth_opts *opts;
+
VSCore *vscore;
const VSAPI *vsapi;
VSNodeRef *out_node;
@@ -56,16 +70,20 @@ struct vf_priv_s {
VSMap **gc_map;
int num_gc_map;
+ struct mp_filter *f;
+ struct mp_pin *in_pin;
+
+ // Format for which VS is currently configured.
struct mp_image_params fmt_in;
pthread_mutex_t lock;
pthread_cond_t wakeup;
// --- the following members are all protected by lock
- struct mp_image *next_image;// used to compute frame duration of oldest image
struct mp_image **buffered; // oldest image first
int num_buffered;
int in_frameno; // frame number of buffered[0] (the oldest)
+ int requested_frameno; // last frame number for which we woke up core
int out_frameno; // frame number of first requested/ready frame
double out_pts; // pts corresponding to first requested/ready frame
struct mp_image **requested;// frame callback results (can point to dummy_img)
@@ -74,25 +92,25 @@ struct vf_priv_s {
bool failed; // frame callback returned with an error
bool shutdown; // ask node to return
bool eof; // drain remaining data
- int64_t frames_sent;
+ int64_t frames_sent; // total nr. of frames ever added to input queue
bool initializing; // filters are being built
bool in_node_active; // node might still be called
-
- // --- options
- char *cfg_file;
- int cfg_maxbuffer;
- int cfg_maxrequests;
};
// priv->requested[n] points to this if a request for frame n is in-progress
static const struct mp_image dummy_img;
+// or if a request failed during EOF/reinit draining
+static const struct mp_image dummy_img_eof;
+
+static void destroy_vs(struct priv *p);
+static int reinit_vs(struct priv *p);
struct script_driver {
- int (*init)(struct vf_instance *vf); // first time init
- void (*uninit)(struct vf_instance *vf); // last time uninit
- int (*load_core)(struct vf_instance *vf); // make vsapi/vscore available
- int (*load)(struct vf_instance *vf, VSMap *vars); // also set p->out_node
- void (*unload)(struct vf_instance *vf); // unload script and maybe vs
+ int (*init)(struct priv *p); // first time init
+ void (*uninit)(struct priv *p); // last time uninit
+ int (*load_core)(struct priv *p); // make vsapi/vscore available
+ int (*load)(struct priv *p, VSMap *vars); // also sets p->out_node
+ void (*unload)(struct priv *p); // unload script and maybe vs
};
struct mpvs_fmt {
@@ -166,7 +184,7 @@ static int mp_from_vs(VSPresetFormat vs)
return 0;
}
-static void copy_mp_to_vs_frame_props_map(struct vf_priv_s *p, VSMap *map,
+static void copy_mp_to_vs_frame_props_map(struct priv *p, VSMap *map,
struct mp_image *img)
{
struct mp_image_params *params = &img->params;
@@ -197,7 +215,7 @@ static void copy_mp_to_vs_frame_props_map(struct vf_priv_s *p, VSMap *map,
p->vsapi->propSetInt(map, "_FieldBased", field, 0);
}
-static int set_vs_frame_props(struct vf_priv_s *p, VSFrameRef *frame,
+static int set_vs_frame_props(struct priv *p, VSFrameRef *frame,
struct mp_image *img, int dur_num, int dur_den)
{
VSMap *map = p->vsapi->getFramePropsRW(frame);
@@ -209,14 +227,14 @@ static int set_vs_frame_props(struct vf_priv_s *p, VSFrameRef *frame,
return 0;
}
-static VSFrameRef *alloc_vs_frame(struct vf_priv_s *p, struct mp_image_params *fmt)
+static VSFrameRef *alloc_vs_frame(struct priv *p, struct mp_image_params *fmt)
{
const VSFormat *vsfmt =
p->vsapi->getFormatPreset(mp_to_vs(fmt->imgfmt), p->vscore);
return p->vsapi->newVideoFrame(vsfmt, fmt->w, fmt->h, NULL, p->vscore);
}
-static struct mp_image map_vs_frame(struct vf_priv_s *p, const VSFrameRef *ref,
+static struct mp_image map_vs_frame(struct priv *p, const VSFrameRef *ref,
bool w)
{
const VSFormat *fmt = p->vsapi->getFrameFormat(ref);
@@ -238,7 +256,7 @@ static struct mp_image map_vs_frame(struct vf_priv_s *p, const VSFrameRef *ref,
return img;
}
-static void drain_oldest_buffered_frame(struct vf_priv_s *p)
+static void drain_oldest_buffered_frame(struct priv *p)
{
if (!p->num_buffered)
return;
@@ -252,185 +270,170 @@ static void drain_oldest_buffered_frame(struct vf_priv_s *p)
static void VS_CC vs_frame_done(void *userData, const VSFrameRef *f, int n,
VSNodeRef *node, const char *errorMsg)
{
- struct vf_instance *vf = userData;
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = userData;
pthread_mutex_lock(&p->lock);
// If these assertions fail, n is an unrequested frame (or filtered twice).
assert(n >= p->out_frameno && n < p->out_frameno + p->max_requests);
int index = n - p->out_frameno;
- MP_TRACE(vf, "filtered frame %d (%d)\n", n, index);
+ MP_TRACE(p, "filtered frame %d (%d)\n", n, index);
assert(p->requested[index] == &dummy_img);
struct mp_image *res = NULL;
if (f) {
struct mp_image img = map_vs_frame(p, f, false);
- img.pts = MP_NOPTS_VALUE;
+ img.pkt_duration = -1;
const VSMap *map = p->vsapi->getFramePropsRO(f);
if (map) {
int err1, err2;
int num = p->vsapi->propGetInt(map, "_DurationNum", 0, &err1);
int den = p->vsapi->propGetInt(map, "_DurationDen", 0, &err2);
if (!err1 && !err2)
- img.pts = num / (double)den; // abusing pts for frame length
+ img.pkt_duration = num / (double)den;
}
- if (img.pts == MP_NOPTS_VALUE)
- MP_ERR(vf, "No PTS after filter at frame %d!\n", n);
+ if (img.pkt_duration < 0)
+ MP_ERR(p, "No PTS after filter at frame %d!\n", n);
res = mp_image_new_copy(&img);
p->vsapi->freeFrame(f);
}
- if (!res) {
- p->failed = true;
- MP_ERR(vf, "Filter error at frame %d: %s\n", n, errorMsg);
+ if (!res && !p->shutdown) {
+ if (p->eof) {
+ res = (struct mp_image *)&dummy_img_eof;
+ } else {
+ p->failed = true;
+ MP_ERR(p, "Filter error at frame %d: %s\n", n, errorMsg);
+ }
}
p->requested[index] = res;
pthread_cond_broadcast(&p->wakeup);
pthread_mutex_unlock(&p->lock);
+ mp_filter_wakeup(p->f);
}
-static bool locked_need_input(struct vf_instance *vf)
+static void vf_vapoursynth_process(struct mp_filter *f)
{
- struct vf_priv_s *p = vf->priv;
- return p->num_buffered < MP_TALLOC_AVAIL(p->buffered);
-}
+ struct priv *p = f->priv;
-// Return true if progress was made.
-static bool locked_read_output(struct vf_instance *vf)
-{
- struct vf_priv_s *p = vf->priv;
- bool r = false;
+ pthread_mutex_lock(&p->lock);
- // Move finished frames from the request slots to the vf output queue.
- while (p->requested[0] && p->requested[0] != &dummy_img) {
- struct mp_image *out = p->requested[0];
- if (out->pts != MP_NOPTS_VALUE) {
- double duration = out->pts;
- out->pts = p->out_pts;
- p->out_pts += duration;
- }
- vf_add_output_frame(vf, out);
- for (int n = 0; n < p->max_requests - 1; n++)
- p->requested[n] = p->requested[n + 1];
- p->requested[p->max_requests - 1] = NULL;
- p->out_frameno++;
- r = true;
+ if (p->failed) {
+ // Not sure what we do on errors, but at least don't deadlock.
+ MP_ERR(f, "failed, no action taken\n");
+ mp_filter_internal_mark_failed(f);
+ goto done;
}
- // Don't request frames if we haven't sent any input yet.
- if (p->num_buffered + p->in_frameno == 0)
- return r;
-
- // Request new future frames as far as possible.
- for (int n = 0; n < p->max_requests; n++) {
- if (!p->requested[n]) {
- // Note: this assumes getFrameAsync() will never call
- // infiltGetFrame (if it does, we would deadlock)
- p->requested[n] = (struct mp_image *)&dummy_img;
- p->failed = false;
- MP_TRACE(vf, "requesting frame %d (%d)\n", p->out_frameno + n, n);
- p->vsapi->getFrameAsync(p->out_frameno + n, p->out_node,
- vs_frame_done, vf);
+ // Read input and pass it to the input queue VS reads.
+ if (p->num_buffered < MP_TALLOC_AVAIL(p->buffered) && !p->eof) {
+ // Note: this requests new input frames even if no output was ever
+ // requested. Normally this is not how mp_filter works, but since VS
+ // works asynchronously, it's probably ok.
+ struct mp_frame frame = mp_pin_out_read(p->in_pin);
+ if (frame.type == MP_FRAME_EOF) {
+ if (p->out_node) {
+ MP_VERBOSE(p, "initiate EOF\n");
+ p->eof = true;
+ pthread_cond_broadcast(&p->wakeup);
+ } else if (mp_pin_in_needs_data(f->ppins[1])) {
+ MP_VERBOSE(p, "return EOF\n");
+ mp_pin_in_write(f->ppins[1], frame);
+ frame = MP_NO_FRAME;
+ }
+ // Keep it until we can propagate it.
+ mp_pin_out_unread(p->in_pin, frame);
+ } else if (frame.type == MP_FRAME_VIDEO) {
+ struct mp_image *mpi = frame.data;
+ // Init VS script, or reinit it to change video format. (This
+ // includes derived parameters we pass manually to the script.)
+ if (!p->out_node || mpi->imgfmt != p->fmt_in.imgfmt ||
+ mpi->w != p->fmt_in.w || mpi->h != p->fmt_in.h ||
+ mpi->params.p_w != p->fmt_in.p_w ||
+ mpi->params.p_h != p->fmt_in.p_h)
+ {
+ if (p->out_node) {
+ // Drain still buffered frames.
+ MP_VERBOSE(p, "draining VS for format change\n");
+ mp_pin_out_unread(p->in_pin, frame);
+ p->eof = true;
+ pthread_cond_broadcast(&p->wakeup);
+ mp_filter_internal_mark_progress(f);
+ goto done;
+ }
+ pthread_mutex_unlock(&p->lock);
+ if (p->out_node)
+ destroy_vs(p);
+ p->fmt_in = mpi->params;
+ if (reinit_vs(p) < 0) {
+ MP_ERR(p, "could not init VS\n");
+ mp_frame_unref(&frame);
+ return;
+ }
+ }
+ if (p->out_pts == MP_NOPTS_VALUE)
+ p->out_pts = mpi->pts;
+ p->frames_sent++;
+ p->buffered[p->num_buffered++] = mpi;
+ pthread_cond_broadcast(&p->wakeup);
+ } else if (frame.type != MP_FRAME_NONE) {
+ MP_ERR(p, "discarding unknown frame type\n");
+ goto done;
}
}
- return r;
-}
+ // Read output and return them from the VS output queue.
+ if (mp_pin_in_needs_data(f->ppins[1]) && p->requested[0] &&
+ p->requested[0] != &dummy_img &&
+ p->requested[0] != &dummy_img_eof)
+ {
+ struct mp_image *out = p->requested[0];
-static int filter_ext(struct vf_instance *vf, struct mp_image *mpi)
-{
- struct vf_priv_s *p = vf->priv;
- int ret = 0;
- bool eof = !mpi;
+ out->pts = p->out_pts;
+ if (p->out_pts != MP_NOPTS_VALUE && out->pkt_duration >= 0)
+ p->out_pts += out->pkt_duration;
- if (!p->out_node) {
- talloc_free(mpi);
- return -1;
- }
+ mp_pin_in_write(f->ppins[1], MAKE_FRAME(MP_FRAME_VIDEO, out));
- MPSWAP(struct mp_image *, p->next_image, mpi);
-
- if (mpi) {
- // Turn PTS into frame duration (the pts field is abused for storing it)
- if (p->out_pts == MP_NOPTS_VALUE)
- p->out_pts = mpi->pts;
- mpi->pts = p->next_image ? p->next_image->pts - mpi->pts : 0;
+ for (int n = 0; n < p->max_requests - 1; n++)
+ p->requested[n] = p->requested[n + 1];
+ p->requested[p->max_requests - 1] = NULL;
+ p->out_frameno++;
}
- // Try to get new frames until we get rid of the input mpi.
- pthread_mutex_lock(&p->lock);
- while (1) {
- // Not sure what we do on errors, but at least don't deadlock.
- if (p->failed) {
- p->failed = false;
- talloc_free(mpi);
- ret = -1;
- break;
- }
-
- // Make the input frame available to infiltGetFrame().
- if (mpi && locked_need_input(vf)) {
- p->frames_sent++;
- p->buffered[p->num_buffered++] = talloc_steal(p->buffered, mpi);
- mpi = NULL;
- pthread_cond_broadcast(&p->wakeup);
- }
-
- locked_read_output(vf);
-
- if (!mpi) {
- if (eof && p->frames_sent && !p->eof) {
- MP_VERBOSE(vf, "input EOF\n");
- p->eof = true;
- pthread_cond_broadcast(&p->wakeup);
- }
- break;
- }
- pthread_cond_wait(&p->wakeup, &p->lock);
+ // This happens on EOF draining and format changes.
+ if (p->requested[0] == &dummy_img_eof) {
+ MP_VERBOSE(p, "finishing up\n");
+ assert(p->eof);
+ pthread_mutex_unlock(&p->lock);
+ destroy_vs(p);
+ mp_filter_internal_mark_progress(f);
+ return;
}
- pthread_mutex_unlock(&p->lock);
- return ret;
-}
-// Fetch 1 outout frame, or 0 if we probably need new input.
-static int filter_out(struct vf_instance *vf)
-{
- struct vf_priv_s *p = vf->priv;
- int ret = 0;
- pthread_mutex_lock(&p->lock);
- while (1) {
- if (p->failed) {
- ret = -1;
- break;
+ // Don't request frames if we haven't sent any input yet.
+ if (p->frames_sent && p->out_node) {
+ // Request new future frames as far as possible.
+ for (int n = 0; n < p->max_requests; n++) {
+ if (!p->requested[n]) {
+ // Note: this assumes getFrameAsync() will never call
+ // infiltGetFrame (if it does, we would deadlock)
+ p->requested[n] = (struct mp_image *)&dummy_img;
+ p->failed = false;
+ MP_TRACE(p, "requesting frame %d (%d)\n", p->out_frameno + n, n);
+ p->vsapi->getFrameAsync(p->out_frameno + n, p->out_node,
+ vs_frame_done, p);
+ }
}
- if (locked_read_output(vf))
- break;
- // If the VS filter wants new input, there's no guarantee that we can
- // actually finish any time soon without feeding new input.
- if (!p->eof && locked_need_input(vf))
- break;
- pthread_cond_wait(&p->wakeup, &p->lock);
}
- pthread_mutex_unlock(&p->lock);
- return ret;
-}
-static bool needs_input(struct vf_instance *vf)
-{
- struct vf_priv_s *p = vf->priv;
- bool r = false;
- pthread_mutex_lock(&p->lock);
- locked_read_output(vf);
- r = vf->num_out_queued < p->max_requests && locked_need_input(vf);
+done:
pthread_mutex_unlock(&p->lock);
- return r;
}
static void VS_CC infiltInit(VSMap *in, VSMap *out, void **instanceData,
VSNode *node, VSCore *core, const VSAPI *vsapi)
{
- struct vf_instance *vf = *instanceData;
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = *instanceData;
// The number of frames of our input node is obviously unknown. The user
// could for example seek any time, randomly "ending" the clip.
// This specific value was suggested by the VapourSynth developer.
@@ -458,30 +461,29 @@ static const VSFrameRef *VS_CC infiltGetFrame(int frameno, int activationReason,
VSFrameContext *frameCtx, VSCore *core,
const VSAPI *vsapi)
{
- struct vf_instance *vf = *instanceData;
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = *instanceData;
VSFrameRef *ret = NULL;
pthread_mutex_lock(&p->lock);
- MP_TRACE(vf, "VS asking for frame %d (at %d)\n", frameno, p->in_frameno);
+ MP_TRACE(p, "VS asking for frame %d (at %d)\n", frameno, p->in_frameno);
while (1) {
if (p->shutdown) {
- p->vsapi->setFilterError("EOF or filter reinit/uninit", frameCtx);
- MP_DBG(vf, "returning error on EOF/reset\n");
+ p->vsapi->setFilterError("EOF or filter reset/uninit", frameCtx);
+ MP_DBG(p, "returning error on reset/uninit\n");
break;
}
if (p->initializing) {
- MP_WARN(vf, "Frame requested during init! This is unsupported.\n"
+ MP_WARN(p, "Frame requested during init! This is unsupported.\n"
"Returning black dummy frame with 0 duration.\n");
- ret = alloc_vs_frame(p, &vf->fmt_in);
+ ret = alloc_vs_frame(p, &p->fmt_in);
if (!ret) {
p->vsapi->setFilterError("Could not allocate VS frame", frameCtx);
break;
}
struct mp_image vsframe = map_vs_frame(p, ret, true);
- mp_image_clear(&vsframe, 0, 0, vf->fmt_in.w, vf->fmt_in.h);
+ mp_image_clear(&vsframe, 0, 0, p->fmt_in.w, p->fmt_in.h);
struct mp_image dummy = {0};
- mp_image_set_params(&dummy, &vf->fmt_in);
+ mp_image_set_params(&dummy, &p->fmt_in);
set_vs_frame_props(p, ret, &dummy, 0, 1);
break;
}
@@ -491,7 +493,7 @@ static const VSFrameRef *VS_CC infiltGetFrame(int frameno, int activationReason,
"Frame %d requested, but only have frames starting from %d. "
"Try increasing the buffered-frames suboption.",
frameno, p->in_frameno);
- MP_FATAL(vf, "%s\n", msg);
+ MP_FATAL(p, "%s\n", msg);
p->vsapi->setFilterError(msg, frameCtx);
break;
}
@@ -501,20 +503,23 @@ static const VSFrameRef *VS_CC infiltGetFrame(int frameno, int activationReason,
if (p->num_buffered) {
drain_oldest_buffered_frame(p);
pthread_cond_broadcast(&p->wakeup);
- if (vf->chain->wakeup_callback)
- vf->chain->wakeup_callback(vf->chain->wakeup_callback_ctx);
+ mp_filter_wakeup(p->f);
continue;
}
}
if (frameno >= p->in_frameno + p->num_buffered) {
- // If we think EOF was reached, don't wait for new input, and assume
- // the VS filter has reached EOF.
+ // If there won't be any new frames, abort the request.
if (p->eof) {
- p->shutdown = true;
- continue;
+ p->vsapi->setFilterError("EOF or filter EOF/reinit", frameCtx);
+ MP_DBG(p, "returning error on EOF/reinit\n");
+ break;
}
- }
- if (frameno < p->in_frameno + p->num_buffered) {
+ // Request more frames.
+ if (p->requested_frameno <= p->in_frameno + p->num_buffered) {
+ p->requested_frameno = p->in_frameno + p->num_buffered + 1;
+ mp_filter_wakeup(p->f);
+ }
+ } else {
struct mp_image *img = p->buffered[frameno - p->in_frameno];
ret = alloc_vs_frame(p, &img->params);
if (!ret) {
@@ -524,7 +529,7 @@ static const VSFrameRef *VS_CC infiltGetFrame(int frameno, int activationReason,
struct mp_image vsframe = map_vs_frame(p, ret, true);
mp_image_copy(&vsframe, img);
int res = 1e6;
- int dur = img->pts * res + 0.5;
+ int dur = img->pkt_duration * res + 0.5;
set_vs_frame_props(p, ret, img, dur, res);
break;
}
@@ -537,8 +542,7 @@ static const VSFrameRef *VS_CC infiltGetFrame(int frameno, int activationReason,
static void VS_CC infiltFree(void *instanceData, VSCore *core, const VSAPI *vsapi)
{
- struct vf_instance *vf = instanceData;
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = instanceData;
pthread_mutex_lock(&p->lock);
p->in_node_active = false;
@@ -548,7 +552,7 @@ static void VS_CC infiltFree(void *instanceData, VSCore *core, const VSAPI *vsap
// number of getAsyncFrame calls in progress
// must be called with p->lock held
-static int num_requested(struct vf_priv_s *p)
+static int num_requested(struct priv *p)
{
int r = 0;
for (int n = 0; n < p->max_requests; n++)
@@ -556,11 +560,12 @@ static int num_requested(struct vf_priv_s *p)
return r;
}
-static void destroy_vs(struct vf_instance *vf)
+static void destroy_vs(struct priv *p)
{
- struct vf_priv_s *p = vf->priv;
+ if (!p->out_node && !p->initializing)
+ return;
- MP_DBG(vf, "destroying VS filters\n");
+ MP_DBG(p, "destroying VS filters\n");
// Wait until our frame callbacks return.
pthread_mutex_lock(&p->lock);
@@ -571,7 +576,7 @@ static void destroy_vs(struct vf_instance *vf)
pthread_cond_wait(&p->wakeup, &p->lock);
pthread_mutex_unlock(&p->lock);
- MP_DBG(vf, "all requests terminated\n");
+ MP_DBG(p, "all requests terminated\n");
if (p->in_node)
p->vsapi->freeNode(p->in_node);
@@ -579,7 +584,7 @@ static void destroy_vs(struct vf_instance *vf)
p->vsapi->freeNode(p->out_node);
p->in_node = p->out_node = NULL;
- p->drv->unload(vf);
+ p->drv->unload(p);
assert(!p->in_node_active);
assert(num_requested(p) == 0); // async callback didn't return?
@@ -588,34 +593,42 @@ static void destroy_vs(struct vf_instance *vf)
p->eof = false;
p->frames_sent = 0;
// Kill filtered images that weren't returned yet
- for (int n = 0; n < p->max_requests; n++)
- mp_image_unrefp(&p->requested[n]);
+ for (int n = 0; n < p->max_requests; n++) {
+ if (p->requested[n] != &dummy_img_eof)
+ mp_image_unrefp(&p->requested[n]);
+ p->requested[n] = NULL;
+ }
// Kill queued frames too
for (int n = 0; n < p->num_buffered; n++)
talloc_free(p->buffered[n]);
p->num_buffered = 0;
- talloc_free(p->next_image);
- p->next_image = NULL;
p->out_pts = MP_NOPTS_VALUE;
p->out_frameno = p->in_frameno = 0;
+ p->requested_frameno = 0;
p->failed = false;
- MP_DBG(vf, "uninitialized.\n");
+ MP_DBG(p, "uninitialized.\n");
}
-static int reinit_vs(struct vf_instance *vf)
+static int reinit_vs(struct priv *p)
{
- struct vf_priv_s *p = vf->priv;
VSMap *vars = NULL, *in = NULL, *out = NULL;
int res = -1;
- destroy_vs(vf);
+ destroy_vs(p);
+
+ MP_DBG(p, "initializing...\n");
+
+ struct mp_imgfmt_desc desc = mp_imgfmt_get_desc(p->fmt_in.imgfmt);
+ if (p->fmt_in.w % desc.align_x || p->fmt_in.h % desc.align_y) {
+ MP_FATAL(p, "VapourSynth does not allow unaligned/cropped video sizes.\n");
+ return -1;
+ }
- MP_DBG(vf, "initializing...\n");
p->initializing = true;
- if (p->drv->load_core(vf) < 0 || !p->vsapi || !p->vscore) {
- MP_FATAL(vf, "Could not get vapoursynth API handle.\n");
+ if (p->drv->load_core(p) < 0 || !p->vsapi || !p->vscore) {
+ MP_FATAL(p, "Could not get vapoursynth API handle.\n");
goto error;
}
@@ -626,11 +639,11 @@ static int reinit_vs(struct vf_instance *vf)
goto error;
p->vsapi->createFilter(in, out, "Input", infiltInit, infiltGetFrame,
- infiltFree, fmSerial, 0, vf, p->vscore);
+ infiltFree, fmSerial, 0, p, p->vscore);
int vserr;
p->in_node = p->vsapi->propGetNode(out, "clip", 0, &vserr);
if (!p->in_node) {
- MP_FATAL(vf, "Could not get our own input node.\n");
+ MP_FATAL(p, "Could not get our own input node.\n");
goto error;
}
@@ -642,26 +655,36 @@ static int reinit_vs(struct vf_instance *vf)
p->vsapi->propSetInt(vars, "video_in_dw", d_w, 0);
p->vsapi->propSetInt(vars, "video_in_dh", d_h, 0);
- p->vsapi->propSetFloat(vars, "container_fps", vf->chain->container_fps, 0);
- p->vsapi->propSetFloat(vars, "display_fps", vf->chain->display_fps, 0);
- if (p->drv->load(vf, vars) < 0)
+ struct mp_stream_info *info = mp_filter_find_stream_info(p->f);
+ double container_fps = 0;
+ double display_fps = 0;
+ if (info) {
+ if (info->get_container_fps)
+ container_fps = info->get_container_fps(info);
+ if (info->get_display_fps)
+ display_fps = info->get_display_fps(info);
+ }
+ p->vsapi->propSetFloat(vars, "container_fps", container_fps, 0);
+ p->vsapi->propSetFloat(vars, "display_fps", display_fps, 0);
+
+ if (p->drv->load(p, vars) < 0)
goto error;
if (!p->out_node) {
- MP_FATAL(vf, "Could not get script output node.\n");
+ MP_FATAL(p, "Could not get script output node.\n");
goto error;
}
const VSVideoInfo *vi = p->vsapi->getVideoInfo(p->out_node);
- if (!isConstantFormat(vi)) {
- MP_FATAL(vf, "Video format is required to be constant.\n");
+ if (!mp_from_vs(vi->format->id)) {
+ MP_FATAL(p, "Unsupported output format.\n");
goto error;
}
pthread_mutex_lock(&p->lock);
p->initializing = false;
pthread_mutex_unlock(&p->lock);
- MP_DBG(vf, "initialized.\n");
+ MP_DBG(p, "initialized.\n");
res = 0;
error:
if (p->vsapi) {
@@ -670,104 +693,110 @@ error:
p->vsapi->freeMap(vars);
}
if (res < 0)
- destroy_vs(vf);
+ destroy_vs(p);
return res;
}
-static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
- struct mp_image_params *out)
+static void vf_vapoursynth_reset(struct mp_filter *f)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = f->priv;
- *out = *in;
- p->fmt_in = *in;
+ destroy_vs(p);
+}
- if (reinit_vs(vf) < 0)
- return -1;
+static void vf_vapoursynth_destroy(struct mp_filter *f)
+{
+ struct priv *p = f->priv;
- const VSVideoInfo *vi = p->vsapi->getVideoInfo(p->out_node);
- out->w = vi->width;
- out->h = vi->height;
- out->imgfmt = mp_from_vs(vi->format->id);
- if (!out->imgfmt) {
- MP_FATAL(vf, "Unsupported output format.\n");
- destroy_vs(vf);
- return -1;
- }
+ destroy_vs(p);
+ p->drv->uninit(p);
- struct mp_imgfmt_desc desc = mp_imgfmt_get_desc(in->imgfmt);
- if (in->w % desc.align_x || in->h % desc.align_y) {
- MP_FATAL(vf, "VapourSynth does not allow unaligned/cropped video sizes.\n");
- destroy_vs(vf);
- return -1;
- }
+ pthread_cond_destroy(&p->wakeup);
+ pthread_mutex_destroy(&p->lock);
- return 0;
+ mp_filter_free_children(f);
}
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- return mp_to_vs(fmt) != pfNone;
-}
+static const struct mp_filter_info vf_vapoursynth_filter = {
+ .name = "vapoursynth",
+ .process = vf_vapoursynth_process,
+ .reset = vf_vapoursynth_reset,
+ .destroy = vf_vapoursynth_destroy,
+ .priv_size = sizeof(struct priv),
+};
-static int control(vf_instance_t *vf, int request, void *data)
+static struct mp_filter *vf_vapoursynth_create(struct mp_filter *parent,
+ void *options)
{
- struct vf_priv_s *p = vf->priv;
- switch (request) {
- case VFCTRL_SEEK_RESET:
- if (p->out_node && reinit_vs(vf) < 0)
- return CONTROL_ERROR;
- return CONTROL_OK;
+ struct mp_filter *f = mp_filter_create(parent, &vf_vapoursynth_filter);
+ if (!f) {
+ talloc_free(options);
+ return NULL;
}
- return CONTROL_UNKNOWN;
-}
-static void uninit(struct vf_instance *vf)
-{
- struct vf_priv_s *p = vf->priv;
+ // In theory, we could allow multiple inputs and outputs, but since this
+ // wrapper is for --vf only, we don't.
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
- destroy_vs(vf);
- p->drv->uninit(vf);
-
- pthread_cond_destroy(&p->wakeup);
- pthread_mutex_destroy(&p->lock);
-}
-static int vf_open(vf_instance_t *vf)
-{
- struct vf_priv_s *p = vf->priv;
- if (p->drv->init(vf) < 0)
- return 0;
- if (!p->cfg_file || !p->cfg_file[0]) {
- MP_FATAL(vf, "'file' parameter must be set.\n");
- return 0;
- }
- talloc_steal(vf, p->cfg_file);
- p->cfg_file = mp_get_user_path(vf, vf->chain->global, p->cfg_file);
+ struct priv *p = f->priv;
+ p->opts = talloc_steal(p, options);
+ p->log = f->log;
+ p->drv = p->opts->drv;
+ p->f = f;
pthread_mutex_init(&p->lock, NULL);
pthread_cond_init(&p->wakeup, NULL);
- vf->reconfig = reconfig;
- vf->filter_ext = filter_ext;
- vf->filter_out = filter_out;
- vf->needs_input = needs_input;
- vf->query_format = query_format;
- vf->control = control;
- vf->uninit = uninit;
- p->max_requests = p->cfg_maxrequests;
+
+ if (!p->opts->file || !p->opts->file[0]) {
+ MP_FATAL(p, "'file' parameter must be set.\n");
+ goto error;
+ }
+ talloc_steal(p, p->opts->file);
+ p->opts->file = mp_get_user_path(p, f->global, p->opts->file);
+
+ p->max_requests = p->opts->maxrequests;
if (p->max_requests < 0)
p->max_requests = av_cpu_count();
- MP_VERBOSE(vf, "using %d concurrent requests.\n", p->max_requests);
- int maxbuffer = p->cfg_maxbuffer * p->max_requests;
- p->buffered = talloc_array(vf, struct mp_image *, maxbuffer);
- p->requested = talloc_zero_array(vf, struct mp_image *, p->max_requests);
- return 1;
+ MP_VERBOSE(p, "using %d concurrent requests.\n", p->max_requests);
+ int maxbuffer = p->opts->maxbuffer * p->max_requests;
+ p->buffered = talloc_array(p, struct mp_image *, maxbuffer);
+ p->requested = talloc_zero_array(p, struct mp_image *, p->max_requests);
+
+ struct mp_autoconvert *conv = mp_autoconvert_create(f);
+ if (!conv)
+ goto error;
+
+ for (int n = 0; mpvs_fmt_table[n].bits; n++) {
+ int imgfmt = mp_from_vs(mpvs_fmt_table[n].vs);
+ if (imgfmt)
+ mp_autoconvert_add_imgfmt(conv, imgfmt, 0);
+ }
+
+ struct mp_filter *dur = mp_compute_frame_duration_create(f);
+ if (!dur)
+ goto error;
+
+ mp_pin_connect(conv->f->pins[0], f->ppins[0]);
+ mp_pin_connect(dur->pins[0], conv->f->pins[1]);
+ p->in_pin = dur->pins[1];
+
+ if (p->drv->init(p) < 0)
+ goto error;
+
+ return f;
+
+error:
+ talloc_free(f);
+ return NULL;
}
-#define OPT_BASE_STRUCT struct vf_priv_s
+
+#define OPT_BASE_STRUCT struct vapoursynth_opts
static const m_option_t vf_opts_fields[] = {
- OPT_STRING("file", cfg_file, M_OPT_FILE),
- OPT_INTRANGE("buffered-frames", cfg_maxbuffer, 0, 1, 9999, OPTDEF_INT(4)),
- OPT_CHOICE_OR_INT("concurrent-frames", cfg_maxrequests, 0, 1, 99,
+ OPT_STRING("file", file, M_OPT_FILE),
+ OPT_INTRANGE("buffered-frames", maxbuffer, 0, 1, 9999, OPTDEF_INT(4)),
+ OPT_CHOICE_OR_INT("concurrent-frames", maxrequests, 0, 1, 99,
({"auto", -1}), OPTDEF_INT(-1)),
{0}
};
@@ -776,24 +805,22 @@ static const m_option_t vf_opts_fields[] = {
#include <VSScript.h>
-static int drv_vss_init(struct vf_instance *vf)
+static int drv_vss_init(struct priv *p)
{
if (!vsscript_init()) {
- MP_FATAL(vf, "Could not initialize VapourSynth scripting.\n");
+ MP_FATAL(p, "Could not initialize VapourSynth scripting.\n");
return -1;
}
return 0;
}
-static void drv_vss_uninit(struct vf_instance *vf)
+static void drv_vss_uninit(struct priv *p)
{
vsscript_finalize();
}
-static int drv_vss_load_core(struct vf_instance *vf)
+static int drv_vss_load_core(struct priv *p)
{
- struct vf_priv_s *p = vf->priv;
-
// First load an empty script to get a VSScript, so that we get the vsapi
// and vscore.
if (vsscript_createScript(&p->se))
@@ -803,24 +830,20 @@ static int drv_vss_load_core(struct vf_instance *vf)
return 0;
}
-static int drv_vss_load(struct vf_instance *vf, VSMap *vars)
+static int drv_vss_load(struct priv *p, VSMap *vars)
{
- struct vf_priv_s *p = vf->priv;
-
vsscript_setVariable(p->se, vars);
- if (vsscript_evaluateFile(&p->se, p->cfg_file, 0)) {
- MP_FATAL(vf, "Script evaluation failed:\n%s\n", vsscript_getError(p->se));
+ if (vsscript_evaluateFile(&p->se, p->opts->file, 0)) {
+ MP_FATAL(p, "Script evaluation failed:\n%s\n", vsscript_getError(p->se));
return -1;
}
p->out_node = vsscript_getOutput(p->se, 0);
return 0;
}
-static void drv_vss_unload(struct vf_instance *vf)
+static void drv_vss_unload(struct priv *p)
{
- struct vf_priv_s *p = vf->priv;
-
if (p->se)
vsscript_freeScript(p->se);
p->se = NULL;
@@ -836,19 +859,17 @@ static const struct script_driver drv_vss = {
.unload = drv_vss_unload,
};
-static int vf_open_vss(vf_instance_t *vf)
-{
- struct vf_priv_s *p = vf->priv;
- p->drv = &drv_vss;
- return vf_open(vf);
-}
-
-const vf_info_t vf_info_vapoursynth = {
- .description = "VapourSynth bridge (Python)",
- .name = "vapoursynth",
- .open = vf_open_vss,
- .priv_size = sizeof(struct vf_priv_s),
- .options = vf_opts_fields,
+const struct mp_user_filter_entry vf_vapoursynth = {
+ .desc = {
+ .description = "VapourSynth bridge (Python)",
+ .name = "vapoursynth",
+ .priv_size = sizeof(OPT_BASE_STRUCT),
+ .priv_defaults = &(const OPT_BASE_STRUCT){
+ .drv = &drv_vss,
+ },
+ .options = vf_opts_fields,
+ },
+ .create = vf_vapoursynth_create,
};
#endif
@@ -875,9 +896,8 @@ static int mp_cpcall (lua_State *L, lua_CFunction func, void *ud)
#define FUCKYOUOHGODWHY lua_pushglobaltable
#endif
-static int drv_lazy_init(struct vf_instance *vf)
+static int drv_lazy_init(struct priv *p)
{
- struct vf_priv_s *p = vf->priv;
p->ls = luaL_newstate();
if (!p->ls)
return -1;
@@ -885,38 +905,36 @@ static int drv_lazy_init(struct vf_instance *vf)
p->vsapi = getVapourSynthAPI(VAPOURSYNTH_API_VERSION);
p->vscore = p->vsapi ? p->vsapi->createCore(0) : NULL;
if (!p->vscore) {
- MP_FATAL(vf, "Could not load VapourSynth.\n");
+ MP_FATAL(p, "Could not load VapourSynth.\n");
lua_close(p->ls);
return -1;
}
return 0;
}
-static void drv_lazy_uninit(struct vf_instance *vf)
+static void drv_lazy_uninit(struct priv *p)
{
- struct vf_priv_s *p = vf->priv;
lua_close(p->ls);
p->vsapi->freeCore(p->vscore);
}
-static int drv_lazy_load_core(struct vf_instance *vf)
+static int drv_lazy_load_core(struct priv *p)
{
// not needed
return 0;
}
-static struct vf_instance *get_vf(lua_State *L)
+static struct priv *get_priv(lua_State *L)
{
lua_getfield(L, LUA_REGISTRYINDEX, "p"); // p
- struct vf_instance *vf = lua_touserdata(L, -1); // p
+ struct priv *p = lua_touserdata(L, -1); // p
lua_pop(L, 1); // -
- return vf;
+ return p;
}
static void vsmap_to_table(lua_State *L, int index, VSMap *map)
{
- struct vf_instance *vf = get_vf(L);
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = get_priv(L);
const VSAPI *vsapi = p->vsapi;
for (int n = 0; n < vsapi->propNumKeys(map); n++) {
const char *key = vsapi->propGetKey(map, n);
@@ -943,8 +961,7 @@ static void vsmap_to_table(lua_State *L, int index, VSMap *map)
static VSMap *table_to_vsmap(lua_State *L, int index)
{
- struct vf_instance *vf = get_vf(L);
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = get_priv(L);
const VSAPI *vsapi = p->vsapi;
assert(index > 0);
VSMap *map = vsapi->createMap();
@@ -989,8 +1006,7 @@ static VSMap *table_to_vsmap(lua_State *L, int index)
static int l_invoke(lua_State *L)
{
- struct vf_instance *vf = get_vf(L);
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = get_priv(L);
const VSAPI *vsapi = p->vsapi;
VSPlugin *plugin = vsapi->getPluginByNs(luaL_checkstring(L, 1), p->vscore);
@@ -1013,7 +1029,7 @@ static int l_invoke(lua_State *L)
}
struct load_ctx {
- struct vf_instance *vf;
+ struct priv *p;
VSMap *vars;
int status;
};
@@ -1022,18 +1038,17 @@ static int load_stuff(lua_State *L)
{
struct load_ctx *ctx = lua_touserdata(L, -1);
lua_pop(L, 1); // -
- struct vf_instance *vf = ctx->vf;
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = ctx->p;
// setup stuff; should be idempotent
- lua_pushlightuserdata(L, vf);
+ lua_pushlightuserdata(L, p);
lua_setfield(L, LUA_REGISTRYINDEX, "p"); // -
lua_pushcfunction(L, l_invoke);
lua_setglobal(L, "invoke");
FUCKYOUOHGODWHY(L);
vsmap_to_table(L, lua_gettop(L), ctx->vars);
- if (luaL_dofile(L, p->cfg_file))
+ if (luaL_dofile(L, p->opts->file))
lua_error(L);
lua_pop(L, 1);
@@ -1044,12 +1059,11 @@ static int load_stuff(lua_State *L)
return 0;
}
-static int drv_lazy_load(struct vf_instance *vf, VSMap *vars)
+static int drv_lazy_load(struct priv *p, VSMap *vars)
{
- struct vf_priv_s *p = vf->priv;
- struct load_ctx ctx = {vf, vars, 0};
+ struct load_ctx ctx = {p, vars, 0};
if (mp_cpcall(p->ls, load_stuff, &ctx)) {
- MP_FATAL(vf, "filter creation failed: %s\n", lua_tostring(p->ls, -1));
+ MP_FATAL(p, "filter creation failed: %s\n", lua_tostring(p->ls, -1));
lua_pop(p->ls, 1);
ctx.status = -1;
}
@@ -1057,10 +1071,8 @@ static int drv_lazy_load(struct vf_instance *vf, VSMap *vars)
return ctx.status;
}
-static void drv_lazy_unload(struct vf_instance *vf)
+static void drv_lazy_unload(struct priv *p)
{
- struct vf_priv_s *p = vf->priv;
-
for (int n = 0; n < p->num_gc_noderef; n++) {
VSNodeRef *ref = p->gc_noderef[n];
if (ref)
@@ -1083,19 +1095,17 @@ static const struct script_driver drv_lazy = {
.unload = drv_lazy_unload,
};
-static int vf_open_lazy(vf_instance_t *vf)
-{
- struct vf_priv_s *p = vf->priv;
- p->drv = &drv_lazy;
- return vf_open(vf);
-}
-
-const vf_info_t vf_info_vapoursynth_lazy = {
- .description = "VapourSynth bridge (Lua)",
- .name = "vapoursynth-lazy",
- .open = vf_open_lazy,
- .priv_size = sizeof(struct vf_priv_s),
- .options = vf_opts_fields,
+const struct mp_user_filter_entry vf_vapoursynth_lazy = {
+ .desc = {
+ .description = "VapourSynth bridge (Lua)",
+ .name = "vapoursynth-lazy",
+ .priv_size = sizeof(OPT_BASE_STRUCT),
+ .priv_defaults = &(const OPT_BASE_STRUCT){
+ .drv = &drv_lazy,
+ },
+ .options = vf_opts_fields,
+ },
+ .create = vf_vapoursynth_create,
};
#endif
diff --git a/video/filter/vf_vavpp.c b/video/filter/vf_vavpp.c
index edee556232..608f1eea6f 100644
--- a/video/filter/vf_vavpp.c
+++ b/video/filter/vf_vavpp.c
@@ -25,8 +25,11 @@
#include "config.h"
#include "options/options.h"
-#include "vf.h"
+#include "filters/filter.h"
+#include "filters/filter_internal.h"
+#include "filters/user_filters.h"
#include "refqueue.h"
+
#include "video/fmt-conversion.h"
#include "video/vaapi.h"
#include "video/hwdec.h"
@@ -47,10 +50,14 @@ struct pipeline {
struct surface_refs forward, backward;
};
-struct vf_priv_s {
+struct opts {
int deint_type;
int interlaced_only;
int reversal_bug;
+};
+
+struct priv {
+ struct opts *opts;
bool do_deint;
VABufferID buffers[VAProcFilterCount];
int num_buffers;
@@ -65,15 +72,7 @@ struct vf_priv_s {
struct mp_refqueue *queue;
};
-static const struct vf_priv_s vf_priv_default = {
- .config = VA_INVALID_ID,
- .context = VA_INVALID_ID,
- .deint_type = 2,
- .interlaced_only = 1,
- .reversal_bug = 1,
-};
-
-static void add_surfaces(struct vf_priv_s *p, struct surface_refs *refs, int dir)
+static void add_surfaces(struct priv *p, struct surface_refs *refs, int dir)
{
for (int n = 0; n < refs->max_surfaces; n++) {
struct mp_image *s = mp_refqueue_get(p->queue, (1 + n) * dir);
@@ -96,18 +95,18 @@ static const int deint_algorithm[] = {
[5] = VAProcDeinterlacingMotionCompensated,
};
-static void flush_frames(struct vf_instance *vf)
+static void flush_frames(struct mp_filter *f)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = f->priv;
mp_refqueue_flush(p->queue);
}
-static void update_pipeline(struct vf_instance *vf)
+static void update_pipeline(struct mp_filter *vf)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
VABufferID *filters = p->buffers;
int num_filters = p->num_buffers;
- if (p->deint_type && !p->do_deint) {
+ if (p->opts->deint_type && !p->do_deint) {
filters++;
num_filters--;
}
@@ -133,7 +132,7 @@ static void update_pipeline(struct vf_instance *vf)
p->pipe.num_output_colors = caps.num_output_color_standards;
p->pipe.forward.max_surfaces = caps.num_forward_references;
p->pipe.backward.max_surfaces = caps.num_backward_references;
- if (p->reversal_bug) {
+ if (p->opts->reversal_bug) {
int max = MPMAX(caps.num_forward_references, caps.num_backward_references);
mp_refqueue_set_refs(p->queue, max, max);
} else {
@@ -142,8 +141,8 @@ static void update_pipeline(struct vf_instance *vf)
}
mp_refqueue_set_mode(p->queue,
(p->do_deint ? MP_MODE_DEINT : 0) |
- (p->deint_type >= 2 ? MP_MODE_OUTPUT_FIELDS : 0) |
- (p->interlaced_only ? MP_MODE_INTERLACED_ONLY : 0));
+ (p->opts->deint_type >= 2 ? MP_MODE_OUTPUT_FIELDS : 0) |
+ (p->opts->interlaced_only ? MP_MODE_INTERLACED_ONLY : 0));
return;
nodeint:
@@ -151,9 +150,25 @@ nodeint:
mp_refqueue_set_mode(p->queue, 0);
}
-static struct mp_image *alloc_out(struct vf_instance *vf)
+static struct mp_image *alloc_out(struct mp_filter *vf)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
+
+ struct mp_image *fmt = mp_refqueue_get_format(p->queue);
+ if (!fmt || !fmt->hwctx)
+ return NULL;
+
+ AVHWFramesContext *hw_frames = (void *)fmt->hwctx->data;
+ // VAAPI requires the full surface size to match for input and output.
+ int src_w = hw_frames->width;
+ int src_h = hw_frames->height;
+
+ if (!mp_update_av_hw_frames_pool(&p->hw_pool, p->av_device_ref,
+ IMGFMT_VAAPI, IMGFMT_NV12, src_w, src_h))
+ {
+ MP_ERR(vf, "Failed to create hw pool.\n");
+ return NULL;
+ }
AVFrame *av_frame = av_frame_alloc();
if (!av_frame)
@@ -169,13 +184,13 @@ static struct mp_image *alloc_out(struct vf_instance *vf)
MP_ERR(vf, "Unknown error.\n");
return NULL;
}
- mp_image_set_size(img, vf->fmt_in.w, vf->fmt_in.h);
+ mp_image_set_size(img, fmt->w, fmt->h);
return img;
}
-static struct mp_image *render(struct vf_instance *vf)
+static struct mp_image *render(struct mp_filter *vf)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
struct mp_image *in = mp_refqueue_get(p->queue, 0);
struct mp_image *img = NULL;
@@ -184,7 +199,7 @@ static struct mp_image *render(struct vf_instance *vf)
VABufferID buffer = VA_INVALID_ID;
VASurfaceID in_id = va_surface_id(in);
- if (!p->pipe.filters || in_id == VA_INVALID_ID || !p->hw_pool)
+ if (!p->pipe.filters || in_id == VA_INVALID_ID)
goto cleanup;
img = alloc_out(vf);
@@ -243,7 +258,7 @@ static struct mp_image *render(struct vf_instance *vf)
param->filters = p->pipe.filters;
param->num_filters = p->pipe.num_filters;
- int dir = p->reversal_bug ? -1 : 1;
+ int dir = p->opts->reversal_bug ? -1 : 1;
add_surfaces(p, &p->pipe.forward, 1 * dir);
param->forward_references = p->pipe.forward.surfaces;
@@ -277,108 +292,29 @@ cleanup:
return NULL;
}
-static struct mp_image *upload(struct vf_instance *vf, struct mp_image *in)
-{
- // Since we do no scaling or csp conversion, we can allocate an output
- // surface for input too.
- struct mp_image *out = alloc_out(vf);
- if (!out)
- return NULL;
- if (!mp_image_hw_upload(out, in)) {
- talloc_free(out);
- return NULL;
- }
- mp_image_copy_attributes(out, in);
- return out;
-}
-
-static int filter_ext(struct vf_instance *vf, struct mp_image *in)
+static void vf_vavpp_process(struct mp_filter *f)
{
- struct vf_priv_s *p = vf->priv;
-
- update_pipeline(vf);
+ struct priv *p = f->priv;
- if (in && in->imgfmt != IMGFMT_VAAPI) {
- struct mp_image *tmp = upload(vf, in);
- talloc_free(in);
- in = tmp;
- if (!in)
- return -1;
- }
-
- mp_refqueue_add_input(p->queue, in);
- return 0;
-}
+ update_pipeline(f);
-static int filter_out(struct vf_instance *vf)
-{
- struct vf_priv_s *p = vf->priv;
+ mp_refqueue_execute_reinit(p->queue);
- if (!mp_refqueue_has_output(p->queue))
- return 0;
+ if (!mp_refqueue_can_output(p->queue))
+ return;
- // no filtering
if (!p->pipe.num_filters || !mp_refqueue_should_deint(p->queue)) {
+ // no filtering
struct mp_image *in = mp_refqueue_get(p->queue, 0);
- vf_add_output_frame(vf, mp_image_new_ref(in));
- mp_refqueue_next(p->queue);
- return 0;
- }
-
- struct mp_image *out = render(vf);
- mp_refqueue_next_field(p->queue);
- if (!out)
- return -1; // cannot render
- vf_add_output_frame(vf, out);
- return 0;
-}
-
-static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
- struct mp_image_params *out)
-{
- struct vf_priv_s *p = vf->priv;
-
- flush_frames(vf);
- av_buffer_unref(&p->hw_pool);
-
- p->params = *in;
- *out = *in;
-
- int src_w = in->w;
- int src_h = in->h;
-
- if (in->imgfmt == IMGFMT_VAAPI) {
- if (!vf->in_hwframes_ref)
- return -1;
- AVHWFramesContext *hw_frames = (void *)vf->in_hwframes_ref->data;
- // VAAPI requires the full surface size to match for input and output.
- src_w = hw_frames->width;
- src_h = hw_frames->height;
+ mp_refqueue_write_out_pin(p->queue, mp_image_new_ref(in));
} else {
- out->imgfmt = IMGFMT_VAAPI;
- out->hw_subfmt = IMGFMT_NV12;
- }
-
- p->hw_pool = av_hwframe_ctx_alloc(p->av_device_ref);
- if (!p->hw_pool)
- return -1;
- AVHWFramesContext *hw_frames = (void *)p->hw_pool->data;
- hw_frames->format = AV_PIX_FMT_VAAPI;
- hw_frames->sw_format = imgfmt2pixfmt(out->hw_subfmt);
- hw_frames->width = src_w;
- hw_frames->height = src_h;
- if (av_hwframe_ctx_init(p->hw_pool) < 0) {
- MP_ERR(vf, "Failed to initialize libavutil vaapi frames pool.\n");
- av_buffer_unref(&p->hw_pool);
- return -1;
+ mp_refqueue_write_out_pin(p->queue, render(f));
}
-
- return 0;
}
-static void uninit(struct vf_instance *vf)
+static void uninit(struct mp_filter *vf)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
for (int i = 0; i < p->num_buffers; i++)
vaDestroyBuffer(p->display, p->buffers[i]);
if (p->context != VA_INVALID_ID)
@@ -387,41 +323,23 @@ static void uninit(struct vf_instance *vf)
vaDestroyConfig(p->display, p->config);
av_buffer_unref(&p->hw_pool);
flush_frames(vf);
- mp_refqueue_free(p->queue);
+ talloc_free(p->queue);
av_buffer_unref(&p->av_device_ref);
}
-static int query_format(struct vf_instance *vf, unsigned int imgfmt)
-{
- if (imgfmt == IMGFMT_VAAPI || imgfmt == IMGFMT_NV12 || imgfmt == IMGFMT_420P)
- return vf_next_query_format(vf, IMGFMT_VAAPI);
- return 0;
-}
-
-static int control(struct vf_instance *vf, int request, void* data)
-{
- switch (request){
- case VFCTRL_SEEK_RESET:
- flush_frames(vf);
- return true;
- default:
- return CONTROL_UNKNOWN;
- }
-}
-
-static int va_query_filter_caps(struct vf_instance *vf, VAProcFilterType type,
+static int va_query_filter_caps(struct mp_filter *vf, VAProcFilterType type,
void *caps, unsigned int count)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
VAStatus status = vaQueryVideoProcFilterCaps(p->display, p->context, type,
caps, &count);
return CHECK_VA_STATUS(vf, "vaQueryVideoProcFilterCaps()") ? count : 0;
}
-static VABufferID va_create_filter_buffer(struct vf_instance *vf, int bytes,
+static VABufferID va_create_filter_buffer(struct mp_filter *vf, int bytes,
int num, void *data)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
VABufferID buffer;
VAStatus status = vaCreateBuffer(p->display, p->context,
VAProcFilterParameterBufferType,
@@ -429,9 +347,9 @@ static VABufferID va_create_filter_buffer(struct vf_instance *vf, int bytes,
return CHECK_VA_STATUS(vf, "vaCreateBuffer()") ? buffer : VA_INVALID_ID;
}
-static bool initialize(struct vf_instance *vf)
+static bool initialize(struct mp_filter *vf)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = vf->priv;
VAStatus status;
VAConfigID config;
@@ -458,14 +376,15 @@ static bool initialize(struct vf_instance *vf)
buffers[i] = VA_INVALID_ID;
for (int i = 0; i < num_filters; i++) {
if (filters[i] == VAProcFilterDeinterlacing) {
- if (p->deint_type < 1)
+ if (p->opts->deint_type < 1)
continue;
VAProcFilterCapDeinterlacing caps[VAProcDeinterlacingCount];
int num = va_query_filter_caps(vf, VAProcFilterDeinterlacing, caps,
VAProcDeinterlacingCount);
if (!num)
continue;
- VAProcDeinterlacingType algorithm = deint_algorithm[p->deint_type];
+ VAProcDeinterlacingType algorithm =
+ deint_algorithm[p->opts->deint_type];
for (int n=0; n < num; n++) { // find the algorithm
if (caps[n].type != algorithm)
continue;
@@ -482,47 +401,59 @@ static bool initialize(struct vf_instance *vf)
p->num_buffers = 0;
if (buffers[VAProcFilterDeinterlacing] != VA_INVALID_ID)
p->buffers[p->num_buffers++] = buffers[VAProcFilterDeinterlacing];
- p->do_deint = !!p->deint_type;
+ p->do_deint = !!p->opts->deint_type;
// next filters: p->buffers[p->num_buffers++] = buffers[next_filter];
return true;
}
-static int vf_open(vf_instance_t *vf)
+static const struct mp_filter_info vf_vavpp_filter = {
+ .name = "vavpp",
+ .process = vf_vavpp_process,
+ .reset = flush_frames,
+ .destroy = uninit,
+ .priv_size = sizeof(struct priv),
+};
+
+static struct mp_filter *vf_vavpp_create(struct mp_filter *parent, void *options)
{
- struct vf_priv_s *p = vf->priv;
-
- if (!vf->hwdec_devs)
- return 0;
-
- vf->reconfig = reconfig;
- vf->filter_ext = filter_ext;
- vf->filter_out = filter_out;
- vf->query_format = query_format;
- vf->uninit = uninit;
- vf->control = control;
-
- p->queue = mp_refqueue_alloc();
-
- hwdec_devices_request_all(vf->hwdec_devs);
- p->av_device_ref =
- hwdec_devices_get_lavc(vf->hwdec_devs, AV_HWDEVICE_TYPE_VAAPI);
- if (!p->av_device_ref) {
- uninit(vf);
- return 0;
+ struct mp_filter *f = mp_filter_create(parent, &vf_vavpp_filter);
+ if (!f) {
+ talloc_free(options);
+ return NULL;
}
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
+
+ struct priv *p = f->priv;
+ p->opts = talloc_steal(p, options);
+ p->config = VA_INVALID_ID;
+ p->context = VA_INVALID_ID;
+
+ p->queue = mp_refqueue_alloc(f);
+
+ p->av_device_ref = mp_filter_load_hwdec_device(f, AV_HWDEVICE_TYPE_VAAPI);
+ if (!p->av_device_ref)
+ goto error;
+
AVHWDeviceContext *hwctx = (void *)p->av_device_ref->data;
AVVAAPIDeviceContext *vactx = hwctx->hwctx;
p->display = vactx->display;
- if (initialize(vf))
- return true;
- uninit(vf);
- return false;
+ mp_refqueue_add_in_format(p->queue, IMGFMT_VAAPI, 0);
+
+ if (!initialize(f))
+ goto error;
+
+ return f;
+
+error:
+ talloc_free(f);
+ return NULL;
}
-#define OPT_BASE_STRUCT struct vf_priv_s
+#define OPT_BASE_STRUCT struct opts
static const m_option_t vf_opts_fields[] = {
OPT_CHOICE("deint", deint_type, 0,
// The values must match with deint_algorithm[].
@@ -537,11 +468,17 @@ static const m_option_t vf_opts_fields[] = {
{0}
};
-const vf_info_t vf_info_vaapi = {
- .description = "VA-API Video Post-Process Filter",
- .name = "vavpp",
- .open = vf_open,
- .priv_size = sizeof(struct vf_priv_s),
- .priv_defaults = &vf_priv_default,
- .options = vf_opts_fields,
+const struct mp_user_filter_entry vf_vavpp = {
+ .desc = {
+ .description = "VA-API Video Post-Process Filter",
+ .name = "vavpp",
+ .priv_size = sizeof(OPT_BASE_STRUCT),
+ .priv_defaults = &(const OPT_BASE_STRUCT){
+ .deint_type = 2,
+ .interlaced_only = 1,
+ .reversal_bug = 1,
+ },
+ .options = vf_opts_fields,
+ },
+ .create = vf_vavpp_create,
};
diff --git a/video/filter/vf_vdpaupp.c b/video/filter/vf_vdpaupp.c
index 391dc9e6b1..3b10e13421 100644
--- a/video/filter/vf_vdpaupp.c
+++ b/video/filter/vf_vdpaupp.c
@@ -26,62 +26,34 @@
#include "common/common.h"
#include "common/msg.h"
#include "options/m_option.h"
-
+#include "filters/filter.h"
+#include "filters/filter_internal.h"
+#include "filters/user_filters.h"
#include "video/img_format.h"
#include "video/mp_image.h"
#include "video/hwdec.h"
#include "video/vdpau.h"
#include "video/vdpau_mixer.h"
-#include "vf.h"
#include "refqueue.h"
// Note: this filter does no actual filtering; it merely sets appropriate
// flags on vdpau images (mp_vdpau_mixer_frame) to do the appropriate
// processing on the final rendering process in the VO.
-struct vf_priv_s {
- struct mp_vdpau_ctx *ctx;
- struct mp_refqueue *queue;
-
- int def_deintmode;
+struct opts {
int deint_enabled;
int interlaced_only;
struct mp_vdpau_mixer_opts opts;
};
-static int filter_ext(struct vf_instance *vf, struct mp_image *mpi)
-{
- struct vf_priv_s *p = vf->priv;
-
- if (p->opts.deint >= 2) {
- mp_refqueue_set_refs(p->queue, 1, 1); // 2 past fields, 1 future field
- } else {
- mp_refqueue_set_refs(p->queue, 0, 0);
- }
- mp_refqueue_set_mode(p->queue,
- (p->deint_enabled ? MP_MODE_DEINT : 0) |
- (p->interlaced_only ? MP_MODE_INTERLACED_ONLY : 0) |
- (p->opts.deint >= 2 ? MP_MODE_OUTPUT_FIELDS : 0));
-
- if (mpi) {
- struct mp_image *new = mp_vdpau_upload_video_surface(p->ctx, mpi);
- talloc_free(mpi);
- if (!new)
- return -1;
- mpi = new;
-
- if (mp_vdpau_mixed_frame_get(mpi)) {
- MP_ERR(vf, "Can't apply vdpaupp filter multiple times.\n");
- vf_add_output_frame(vf, mpi);
- return -1;
- }
- }
-
- mp_refqueue_add_input(p->queue, mpi);
- return 0;
-}
+struct priv {
+ struct opts *opts;
+ struct mp_vdpau_ctx *ctx;
+ struct mp_refqueue *queue;
+ struct mp_pin *in_pin;
+};
-static VdpVideoSurface ref_field(struct vf_priv_s *p,
+static VdpVideoSurface ref_field(struct priv *p,
struct mp_vdpau_mixer_frame *frame, int pos)
{
struct mp_image *mpi = mp_image_new_ref(mp_refqueue_get_field(p->queue, pos));
@@ -91,17 +63,19 @@ static VdpVideoSurface ref_field(struct vf_priv_s *p,
return (uintptr_t)mpi->planes[3];
}
-static int filter_out(struct vf_instance *vf)
+static void vf_vdpaupp_process(struct mp_filter *f)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = f->priv;
- if (!mp_refqueue_has_output(p->queue))
- return 0;
+ mp_refqueue_execute_reinit(p->queue);
+
+ if (!mp_refqueue_can_output(p->queue))
+ return;
struct mp_image *mpi =
mp_vdpau_mixed_frame_create(mp_refqueue_get_field(p->queue, 0));
if (!mpi)
- return -1; // OOM
+ return; // OOM
struct mp_vdpau_mixer_frame *frame = mp_vdpau_mixed_frame_get(mpi);
if (!mp_refqueue_should_deint(p->queue)) {
@@ -117,72 +91,52 @@ static int filter_out(struct vf_instance *vf)
frame->past[0] = ref_field(p, frame, -1);
frame->past[1] = ref_field(p, frame, -2);
- frame->opts = p->opts;
+ frame->opts = p->opts->opts;
mpi->planes[3] = (void *)(uintptr_t)frame->current;
- mp_refqueue_next_field(p->queue);
+ mpi->params.hw_subfmt = 0; // force mixer
- vf_add_output_frame(vf, mpi);
- return 0;
+ mp_refqueue_write_out_pin(p->queue, mpi);
}
-static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
- struct mp_image_params *out)
+static void vf_vdpaupp_reset(struct mp_filter *f)
{
- struct vf_priv_s *p = vf->priv;
+ struct priv *p = f->priv;
mp_refqueue_flush(p->queue);
- *out = *in;
- out->imgfmt = IMGFMT_VDPAU;
- out->hw_subfmt = 0;
- return 0;
}
-static int query_format(struct vf_instance *vf, unsigned int fmt)
+static void vf_vdpaupp_destroy(struct mp_filter *f)
{
- if (fmt == IMGFMT_VDPAU || mp_vdpau_get_format(fmt, NULL, NULL))
- return vf_next_query_format(vf, IMGFMT_VDPAU);
- return 0;
+ struct priv *p = f->priv;
+ talloc_free(p->queue);
}
-static int control(vf_instance_t *vf, int request, void *data)
-{
- struct vf_priv_s *p = vf->priv;
-
- switch (request) {
- case VFCTRL_SEEK_RESET:
- mp_refqueue_flush(p->queue);
- return CONTROL_OK;
- }
- return CONTROL_UNKNOWN;
-}
-
-static void uninit(struct vf_instance *vf)
-{
- struct vf_priv_s *p = vf->priv;
-
- mp_refqueue_free(p->queue);
-}
+static const struct mp_filter_info vf_vdpaupp_filter = {
+ .name = "vdpaupp",
+ .process = vf_vdpaupp_process,
+ .reset = vf_vdpaupp_reset,
+ .destroy = vf_vdpaupp_destroy,
+ .priv_size = sizeof(struct priv),
+};
-static int vf_open(vf_instance_t *vf)
+static struct mp_filter *vf_vdpaupp_create(struct mp_filter *parent, void *options)
{
- struct vf_priv_s *p = vf->priv;
+ struct mp_filter *f = mp_filter_create(parent, &vf_vdpaupp_filter);
+ if (!f) {
+ talloc_free(options);
+ return NULL;
+ }
- if (!vf->hwdec_devs)
- return 0;
+ mp_filter_add_pin(f, MP_PIN_IN, "in");
+ mp_filter_add_pin(f, MP_PIN_OUT, "out");
- vf->reconfig = reconfig;
- vf->filter_ext = filter_ext;
- vf->filter_out = filter_out;
- vf->query_format = query_format;
- vf->control = control;
- vf->uninit = uninit;
+ struct priv *p = f->priv;
+ p->opts = talloc_steal(p, options);
- p->queue = mp_refqueue_alloc();
+ p->queue = mp_refqueue_alloc(f);
- hwdec_devices_request_all(vf->hwdec_devs);
- AVBufferRef *ref =
- hwdec_devices_get_lavc(vf->hwdec_devs, AV_HWDEVICE_TYPE_VDPAU);
+ AVBufferRef *ref = mp_filter_load_hwdec_device(f, AV_HWDEVICE_TYPE_VDPAU);
if (!ref)
goto error;
p->ctx = mp_vdpau_get_ctx_from_av(ref);
@@ -190,18 +144,29 @@ static int vf_open(vf_instance_t *vf)
if (!p->ctx)
goto error;
- p->def_deintmode = p->opts.deint;
- if (!p->deint_enabled)
- p->opts.deint = 0;
+ if (!p->opts->deint_enabled)
+ p->opts->opts.deint = 0;
- return 1;
+ if (p->opts->opts.deint >= 2) {
+ mp_refqueue_set_refs(p->queue, 1, 1); // 2 past fields, 1 future field
+ } else {
+ mp_refqueue_set_refs(p->queue, 0, 0);
+ }
+ mp_refqueue_set_mode(p->queue,
+ (p->opts->deint_enabled ? MP_MODE_DEINT : 0) |
+ (p->opts->interlaced_only ? MP_MODE_INTERLACED_ONLY : 0) |
+ (p->opts->opts.deint >= 2 ? MP_MODE_OUTPUT_FIELDS : 0));
+
+ mp_refqueue_add_in_format(p->queue, IMGFMT_VDPAU, 0);
+
+ return f;
error:
- uninit(vf);
- return 0;
+ talloc_free(f);
+ return NULL;
}
-#define OPT_BASE_STRUCT struct vf_priv_s
+#define OPT_BASE_STRUCT struct opts
static const m_option_t vf_opts_fields[] = {
OPT_CHOICE("deint-mode", opts.deint, 0,
({"first-field", 1},
@@ -219,10 +184,12 @@ static const m_option_t vf_opts_fields[] = {
{0}
};
-const vf_info_t vf_info_vdpaupp = {
- .description = "vdpau postprocessing",
- .name = "vdpaupp",
- .open = vf_open,
- .priv_size = sizeof(struct vf_priv_s),
- .options = vf_opts_fields,
+const struct mp_user_filter_entry vf_vdpaupp = {
+ .desc = {
+ .description = "vdpau postprocessing",
+ .name = "vdpaupp",
+ .priv_size = sizeof(OPT_BASE_STRUCT),
+ .options = vf_opts_fields,
+ },
+ .create = vf_vdpaupp_create,
};
diff --git a/video/fmt-conversion.c b/video/fmt-conversion.c
index e89ea6cd59..f6a774e289 100644
--- a/video/fmt-conversion.c
+++ b/video/fmt-conversion.c
@@ -67,8 +67,7 @@ static const struct {
{IMGFMT_VAAPI, AV_PIX_FMT_VAAPI},
{IMGFMT_DXVA2, AV_PIX_FMT_DXVA2_VLD},
#if HAVE_D3D_HWACCEL
- {IMGFMT_D3D11VA, AV_PIX_FMT_D3D11},
- {IMGFMT_D3D11NV12, AV_PIX_FMT_D3D11},
+ {IMGFMT_D3D11, AV_PIX_FMT_D3D11},
#endif
{IMGFMT_MMAL, AV_PIX_FMT_MMAL},
#if HAVE_CUDA_HWACCEL
diff --git a/video/hwdec.c b/video/hwdec.c
index b52b082a38..b3c2131791 100644
--- a/video/hwdec.c
+++ b/video/hwdec.c
@@ -34,18 +34,17 @@ void hwdec_devices_destroy(struct mp_hwdec_devices *devs)
talloc_free(devs);
}
-struct AVBufferRef *hwdec_devices_get_lavc(struct mp_hwdec_devices *devs,
- int av_hwdevice_type)
+struct mp_hwdec_ctx *hwdec_devices_get_by_lavc(struct mp_hwdec_devices *devs,
+ int av_hwdevice_type)
{
- AVBufferRef *res = NULL;
+ struct mp_hwdec_ctx *res = NULL;
pthread_mutex_lock(&devs->lock);
for (int n = 0; n < devs->num_hwctxs; n++) {
struct mp_hwdec_ctx *dev = devs->hwctxs[n];
if (dev->av_device_ref) {
AVHWDeviceContext *hwctx = (void *)dev->av_device_ref->data;
if (hwctx->type == av_hwdevice_type) {
- if (dev->av_device_ref)
- res = av_buffer_ref(dev->av_device_ref);
+ res = dev;
break;
}
}
@@ -54,10 +53,24 @@ struct AVBufferRef *hwdec_devices_get_lavc(struct mp_hwdec_devices *devs,
return res;
}
+struct AVBufferRef *hwdec_devices_get_lavc(struct mp_hwdec_devices *devs,
+ int av_hwdevice_type)
+{
+ struct mp_hwdec_ctx *ctx = hwdec_devices_get_by_lavc(devs, av_hwdevice_type);
+ if (!ctx)
+ return NULL;
+ return av_buffer_ref(ctx->av_device_ref);
+}
+
struct mp_hwdec_ctx *hwdec_devices_get_first(struct mp_hwdec_devices *devs)
{
+ return hwdec_devices_get_n(devs, 0);
+}
+
+struct mp_hwdec_ctx *hwdec_devices_get_n(struct mp_hwdec_devices *devs, int n)
+{
pthread_mutex_lock(&devs->lock);
- struct mp_hwdec_ctx *res = devs->num_hwctxs ? devs->hwctxs[0] : NULL;
+ struct mp_hwdec_ctx *res = n < devs->num_hwctxs ? devs->hwctxs[n] : NULL;
pthread_mutex_unlock(&devs->lock);
return res;
}
diff --git a/video/hwdec.h b/video/hwdec.h
index 1022654d85..d951a1cd8c 100644
--- a/video/hwdec.h
+++ b/video/hwdec.h
@@ -14,7 +14,9 @@ struct mp_hwdec_ctx {
struct AVBufferRef *av_device_ref; // AVHWDeviceContext*
// List of IMGFMT_s, terminated with 0. NULL if N/A.
- int *supported_formats;
+ const int *supported_formats;
+ // HW format for which above hw_subfmts are valid.
+ int hw_imgfmt;
};
// Used to communicate hardware decoder device handles from VO to video decoder.
@@ -32,9 +34,15 @@ void hwdec_devices_destroy(struct mp_hwdec_devices *devs);
struct AVBufferRef *hwdec_devices_get_lavc(struct mp_hwdec_devices *devs,
int av_hwdevice_type);
+struct mp_hwdec_ctx *hwdec_devices_get_by_lavc(struct mp_hwdec_devices *devs,
+ int av_hwdevice_type);
+
// For code which still strictly assumes there is 1 (or none) device.
struct mp_hwdec_ctx *hwdec_devices_get_first(struct mp_hwdec_devices *devs);
+// Return the n-th device. NULL if none.
+struct mp_hwdec_ctx *hwdec_devices_get_n(struct mp_hwdec_devices *devs, int n);
+
// Add this to the list of internal devices. Adding the same pointer twice must
// be avoided.
void hwdec_devices_add(struct mp_hwdec_devices *devs, struct mp_hwdec_ctx *ctx);
diff --git a/video/img_format.c b/video/img_format.c
index 131ff4cb11..42c5c9f5ca 100644
--- a/video/img_format.c
+++ b/video/img_format.c
@@ -36,8 +36,6 @@ struct mp_imgfmt_entry {
static const struct mp_imgfmt_entry mp_imgfmt_list[] = {
// not in ffmpeg
{"vdpau_output", IMGFMT_VDPAU_OUTPUT},
- {"d3d11_nv12", IMGFMT_D3D11NV12},
- {"d3d11_rgb", IMGFMT_D3D11RGB},
// FFmpeg names have an annoying "_vld" suffix
{"videotoolbox", IMGFMT_VIDEOTOOLBOX},
{"vaapi", IMGFMT_VAAPI},
@@ -103,20 +101,12 @@ static struct mp_imgfmt_desc mp_only_imgfmt_desc(int mpfmt)
{
switch (mpfmt) {
case IMGFMT_VDPAU_OUTPUT:
- case IMGFMT_D3D11RGB:
return (struct mp_imgfmt_desc) {
.id = mpfmt,
.avformat = AV_PIX_FMT_NONE,
.flags = MP_IMGFLAG_BE | MP_IMGFLAG_LE | MP_IMGFLAG_RGB |
MP_IMGFLAG_HWACCEL,
};
- case IMGFMT_D3D11NV12:
- return (struct mp_imgfmt_desc) {
- .id = mpfmt,
- .avformat = AV_PIX_FMT_NONE,
- .flags = MP_IMGFLAG_BE | MP_IMGFLAG_LE | MP_IMGFLAG_YUV |
- MP_IMGFLAG_HWACCEL,
- };
}
return (struct mp_imgfmt_desc) {0};
}
diff --git a/video/img_format.h b/video/img_format.h
index 8f90682325..b79cbdb99b 100644
--- a/video/img_format.h
+++ b/video/img_format.h
@@ -190,16 +190,9 @@ enum mp_imgfmt {
IMGFMT_VDPAU, // VdpVideoSurface
IMGFMT_VDPAU_OUTPUT, // VdpOutputSurface
IMGFMT_VAAPI,
- // NV12/P010/P016
// plane 0: ID3D11Texture2D
// plane 1: slice index casted to pointer
- IMGFMT_D3D11VA,
- // Like IMGFMT_D3D11VA, but format is restricted to NV12.
- IMGFMT_D3D11NV12,
- // Like IMGFMT_D3D11VA, but format is restricted to a certain RGB format.
- // Also, it must have a share handle, have been flushed, and not be a
- // texture array slice.
- IMGFMT_D3D11RGB,
+ IMGFMT_D3D11,
IMGFMT_DXVA2, // IDirect3DSurface9 (NV12/P010/P016)
IMGFMT_MMAL, // MMAL_BUFFER_HEADER_T
IMGFMT_VIDEOTOOLBOX, // CVPixelBufferRef
diff --git a/video/out/d3d11/hwdec_d3d11va.c b/video/out/d3d11/hwdec_d3d11va.c
index d83fdc57af..8d22fe3de5 100644
--- a/video/out/d3d11/hwdec_d3d11va.c
+++ b/video/out/d3d11/hwdec_d3d11va.c
@@ -104,9 +104,12 @@ static int init(struct ra_hwdec *hw)
ID3D10Multithread_SetMultithreadProtected(multithread, TRUE);
ID3D10Multithread_Release(multithread);
+ static const int subfmts[] = {IMGFMT_NV12, IMGFMT_P010, 0};
p->hwctx = (struct mp_hwdec_ctx){
.driver_name = hw->driver->name,
.av_device_ref = d3d11_wrap_device_ref(p->device),
+ .supported_formats = subfmts,
+ .hw_imgfmt = IMGFMT_D3D11,
};
hwdec_devices_add(hw->devs, &p->hwctx);
return 0;
@@ -236,7 +239,7 @@ static void mapper_unmap(struct ra_hwdec_mapper *mapper)
const struct ra_hwdec_driver ra_hwdec_d3d11va = {
.name = "d3d11va",
.priv_size = sizeof(struct priv_owner),
- .imgfmts = {IMGFMT_D3D11VA, IMGFMT_D3D11NV12, 0},
+ .imgfmts = {IMGFMT_D3D11, 0},
.init = init,
.uninit = uninit,
.mapper = &(const struct ra_hwdec_mapper_driver){
diff --git a/video/out/opengl/hwdec_d3d11egl.c b/video/out/opengl/hwdec_d3d11egl.c
index e7416330d7..f9a6700bb9 100644
--- a/video/out/opengl/hwdec_d3d11egl.c
+++ b/video/out/opengl/hwdec_d3d11egl.c
@@ -178,9 +178,12 @@ static int init(struct ra_hwdec *hw)
ID3D10Multithread_SetMultithreadProtected(multithread, TRUE);
ID3D10Multithread_Release(multithread);
+ static const int subfmts[] = {IMGFMT_NV12, 0};
p->hwctx = (struct mp_hwdec_ctx){
.driver_name = hw->driver->name,
.av_device_ref = d3d11_wrap_device_ref(p->d3d11_device),
+ .supported_formats = subfmts,
+ .hw_imgfmt = IMGFMT_D3D11,
};
hwdec_devices_add(hw->devs, &p->hwctx);
@@ -332,7 +335,7 @@ static void mapper_unmap(struct ra_hwdec_mapper *mapper)
const struct ra_hwdec_driver ra_hwdec_d3d11egl = {
.name = "d3d11-egl",
.priv_size = sizeof(struct priv_owner),
- .imgfmts = {IMGFMT_D3D11NV12, 0},
+ .imgfmts = {IMGFMT_D3D11, 0},
.init = init,
.uninit = uninit,
.mapper = &(const struct ra_hwdec_mapper_driver){
diff --git a/video/out/opengl/hwdec_d3d11eglrgb.c b/video/out/opengl/hwdec_d3d11eglrgb.c
index c8f6580320..db7b1cfbee 100644
--- a/video/out/opengl/hwdec_d3d11eglrgb.c
+++ b/video/out/opengl/hwdec_d3d11eglrgb.c
@@ -135,9 +135,12 @@ static int init(struct ra_hwdec *hw)
goto fail;
}
+ static const int subfmts[] = {IMGFMT_RGB0, 0};
p->hwctx = (struct mp_hwdec_ctx){
.driver_name = hw->driver->name,
.av_device_ref = d3d11_wrap_device_ref(p->d3d11_device),
+ .supported_formats = subfmts,
+ .hw_imgfmt = IMGFMT_D3D11,
};
hwdec_devices_add(hw->devs, &p->hwctx);
@@ -159,6 +162,11 @@ static int mapper_init(struct ra_hwdec_mapper *mapper)
struct priv *p = mapper->priv;
GL *gl = ra_gl_get(mapper->ra);
+ if (mapper->src_params.hw_subfmt != IMGFMT_RGB0) {
+ MP_FATAL(mapper, "Format not supported.\n");
+ return -1;
+ }
+
gl->GenTextures(1, &p->gl_texture);
gl->BindTexture(GL_TEXTURE_2D, p->gl_texture);
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
@@ -258,7 +266,7 @@ static int mapper_map(struct ra_hwdec_mapper *mapper)
const struct ra_hwdec_driver ra_hwdec_d3d11eglrgb = {
.name = "d3d11-egl-rgb",
.priv_size = sizeof(struct priv_owner),
- .imgfmts = {IMGFMT_D3D11RGB, 0},
+ .imgfmts = {IMGFMT_D3D11, 0},
.init = init,
.uninit = uninit,
.mapper = &(const struct ra_hwdec_mapper_driver){
diff --git a/wscript_build.py b/wscript_build.py
index ce51455c48..228392068f 100644
--- a/wscript_build.py
+++ b/wscript_build.py
@@ -253,6 +253,17 @@ def build(ctx):
( "demux/packet.c" ),
( "demux/timeline.c" ),
+ ( "filters/f_autoconvert.c" ),
+ ( "filters/f_auto_filters.c" ),
+ ( "filters/f_hwtransfer.c" ),
+ ( "filters/f_lavfi.c" ),
+ ( "filters/f_output_chain.c" ),
+ ( "filters/f_swscale.c" ),
+ ( "filters/f_utils.c" ),
+ ( "filters/filter.c" ),
+ ( "filters/frame.c" ),
+ ( "filters/user_filters.c" ),
+
## Input
( "input/cmd_list.c" ),
( "input/cmd_parse.c" ),
@@ -365,11 +376,8 @@ def build(ctx):
( "video/decode/dec_video.c"),
( "video/decode/vd_lavc.c" ),
( "video/filter/refqueue.c" ),
- ( "video/filter/vf.c" ),
- ( "video/filter/vf_convert.c" ),
( "video/filter/vf_d3d11vpp.c", "d3d-hwaccel" ),
( "video/filter/vf_format.c" ),
- ( "video/filter/vf_lavfi.c" ),
( "video/filter/vf_sub.c" ),
( "video/filter/vf_vapoursynth.c", "vapoursynth-core" ),
( "video/filter/vf_vavpp.c", "vaapi" ),