ALL: use new mp_thread abstraction

This commit is contained in:
Kacper Michajłow 2023-10-21 04:55:41 +02:00 committed by Dudemanguy
parent 3a8b107f62
commit 174df99ffa
81 changed files with 1255 additions and 1302 deletions

View File

@ -430,7 +430,7 @@ like a log file or the internal console.lua script.
Locking
-------
See generally available literature. In mpv, we use pthread for this.
See generally available literature. In mpv, we use mp_thread for this.
Always keep locking clean. Don't skip locking just because it will work "in
practice". (See undefined behavior section.) If your use case is simple, you may
@ -555,13 +555,13 @@ Condition variables
-------------------
They're used whenever a thread needs to wait for something, without nonsense
like sleep calls or busy waiting. mpv uses the standard pthread API for this.
There's a lot of literature on it. Read it.
like sleep calls or busy waiting. mpv uses the mp_thread API for this.
There's a lot of literature on condition variables, threading in general. Read it.
For initial understanding, it may be helpful to know that condition variables
are not variables that signal a condition. pthread_cond_t does not have any
state per-se. Maybe pthread_cond_t would better be named pthread_interrupt_t,
because its sole purpose is to interrupt a thread waiting via pthread_cond_wait()
are not variables that signal a condition. mp_cond does not have any
state per-se. Maybe mp_cond would better be named mp_interrupt,
because its sole purpose is to interrupt a thread waiting via mp_cond_wait()
(or similar). The "something" in "waiting for something" can be called
predicate (to avoid confusing it with "condition"). Consult literature for the
proper terms.
@ -570,24 +570,24 @@ The very short version is...
Shared declarations:
pthread_mutex_t lock;
pthread_cond_t cond_var;
mp_mutex lock;
mp_cond cond_var;
struct something state_var; // protected by lock, changes signaled by cond_var
Waiter thread:
pthread_mutex_lock(&lock);
mp_mutex_lock(&lock);
// Wait for a change in state_var. We want to wait until predicate_fulfilled()
// returns true.
// Must be a loop for 2 reasons:
// 1. cond_var may be associated with other conditions too
// 2. pthread_cond_wait() can have sporadic wakeups
// 2. mp_cond_wait() can have sporadic wakeups
while (!predicate_fulfilled(&state_var)) {
// This unlocks, waits for cond_var to be signaled, and then locks again.
// The _whole_ point of cond_var is that unlocking and waiting for the
// signal happens atomically.
pthread_cond_wait(&cond_var, &lock);
mp_cond_wait(&cond_var, &lock);
}
// Here you may react to the state change. The state cannot change
@ -595,43 +595,43 @@ Waiter thread:
// and reacquire it).
// ...
pthread_mutex_unlock(&lock);
mp_mutex_unlock(&lock);
Signaler thread:
pthread_mutex_lock(&lock);
mp_mutex_lock(&lock);
// Something changed. Update the shared variable with the new state.
update_state(&state_var);
// Notify that something changed. This will wake up the waiter thread if
// it's blocked in pthread_cond_wait(). If not, nothing happens.
pthread_cond_broadcast(&cond_var);
// it's blocked in mp_cond_wait(). If not, nothing happens.
mp_cond_broadcast(&cond_var);
// Fun fact: good implementations wake up the waiter only when the lock is
// released, to reduce kernel scheduling overhead.
pthread_mutex_unlock(&lock);
mp_mutex_unlock(&lock);
Some basic rules:
1. Always access your state under proper locking
2. Always check your predicate before every call to pthread_cond_wait()
(And don't call pthread_cond_wait() if the predicate is fulfilled.)
3. Always call pthread_cond_wait() in a loop
2. Always check your predicate before every call to mp_cond_wait()
(And don't call mp_cond_wait() if the predicate is fulfilled.)
3. Always call mp_cond_wait() in a loop
(And only if your predicate failed without releasing the lock..)
4. Always call pthread_cond_broadcast()/_signal() inside of its associated
4. Always call mp_cond_broadcast()/_signal() inside of its associated
lock
mpv sometimes violates rule 3, and leaves "retrying" (i.e. looping) to the
caller.
Common pitfalls:
- Thinking that pthread_cond_t is some kind of semaphore, or holds any
- Thinking that mp_cond is some kind of semaphore, or holds any
application state or the user predicate (it _only_ wakes up threads
that are at the same time blocking on pthread_cond_wait() and friends,
that are at the same time blocking on mp_cond_wait() and friends,
nothing else)
- Changing the predicate, but not updating all pthread_cond_broadcast()/
- Changing the predicate, but not updating all mp_cond_broadcast()/
_signal() calls correctly
- Forgetting that pthread_cond_wait() unlocks the lock (other threads can
- Forgetting that mp_cond_wait() unlocks the lock (other threads can
and must acquire the lock)
- Holding multiple nested locks while trying to wait (=> deadlock, violates
the lock order anyway)

View File

@ -62,9 +62,9 @@ struct priv {
bool thread_terminate;
bool thread_created;
pthread_t thread;
pthread_mutex_t lock;
pthread_cond_t wakeup;
mp_thread thread;
mp_mutex lock;
mp_cond wakeup;
};
struct JNIByteBuffer {
@ -549,13 +549,13 @@ static int init_jni(struct ao *ao)
return 0;
}
static void *playthread(void *arg)
static MP_THREAD_VOID playthread(void *arg)
{
struct ao *ao = arg;
struct priv *p = ao->priv;
JNIEnv *env = MP_JNI_GET_ENV(ao);
mpthread_set_name("ao/audiotrack");
pthread_mutex_lock(&p->lock);
mp_thread_set_name("ao/audiotrack");
mp_mutex_lock(&p->lock);
while (!p->thread_terminate) {
int state = AudioTrack.PLAYSTATE_PAUSED;
if (p->audiotrack) {
@ -579,12 +579,11 @@ static void *playthread(void *arg)
MP_ERR(ao, "AudioTrack.write failed with %d\n", ret);
}
} else {
struct timespec wait = mp_rel_time_to_timespec(0.300);
pthread_cond_timedwait(&p->wakeup, &p->lock, &wait);
mp_cond_timedwait(&p->wakeup, &p->lock, MP_TIME_MS_TO_NS(300));
}
}
pthread_mutex_unlock(&p->lock);
return NULL;
mp_mutex_unlock(&p->lock);
MP_THREAD_RETURN();
}
static void uninit(struct ao *ao)
@ -598,13 +597,13 @@ static void uninit(struct ao *ao)
MP_JNI_EXCEPTION_LOG(ao);
}
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
p->thread_terminate = true;
pthread_cond_signal(&p->wakeup);
pthread_mutex_unlock(&p->lock);
mp_cond_signal(&p->wakeup);
mp_mutex_unlock(&p->lock);
if (p->thread_created)
pthread_join(p->thread, NULL);
mp_thread_join(p->thread);
if (p->audiotrack) {
MP_JNI_CALL_VOID(p->audiotrack, AudioTrack.release);
@ -638,8 +637,8 @@ static void uninit(struct ao *ao)
p->timestamp = NULL;
}
pthread_cond_destroy(&p->wakeup);
pthread_mutex_destroy(&p->lock);
mp_cond_destroy(&p->wakeup);
mp_mutex_destroy(&p->lock);
uninit_jni(ao);
}
@ -651,8 +650,8 @@ static int init(struct ao *ao)
if (!env)
return -1;
pthread_mutex_init(&p->lock, NULL);
pthread_cond_init(&p->wakeup, NULL);
mp_mutex_init(&p->lock);
mp_cond_init(&p->wakeup);
if (init_jni(ao) < 0)
return -1;
@ -781,7 +780,7 @@ static int init(struct ao *ao)
goto error;
}
if (pthread_create(&p->thread, NULL, playthread, ao)) {
if (mp_thread_create(&p->thread, playthread, ao)) {
MP_ERR(ao, "pthread creation failed\n");
goto error;
}
@ -828,7 +827,7 @@ static void start(struct ao *ao)
MP_JNI_CALL_VOID(p->audiotrack, AudioTrack.play);
MP_JNI_EXCEPTION_LOG(ao);
pthread_cond_signal(&p->wakeup);
mp_cond_signal(&p->wakeup);
}
#define OPT_BASE_STRUCT struct priv

View File

@ -173,7 +173,7 @@ static int init(struct ao *ao)
return 0;
fail:
pthread_mutex_unlock(&ao->encode_lavc_ctx->lock);
mp_mutex_unlock(&ao->encode_lavc_ctx->lock);
ac->shutdown = true;
return -1;
}
@ -261,7 +261,7 @@ static bool audio_write(struct ao *ao, void **data, int samples)
double outpts = pts;
// for ectx PTS fields
pthread_mutex_lock(&ectx->lock);
mp_mutex_lock(&ectx->lock);
if (!ectx->options->rawts) {
// Fix and apply the discontinuity pts offset.
@ -290,7 +290,7 @@ static bool audio_write(struct ao *ao, void **data, int samples)
ectx->next_in_pts = nextpts;
}
pthread_mutex_unlock(&ectx->lock);
mp_mutex_unlock(&ectx->lock);
mp_aframe_set_pts(af, outpts);

View File

@ -23,13 +23,12 @@
#include "common/msg.h"
#include "audio/format.h"
#include "options/m_option.h"
#include "osdep/threads.h"
#include "osdep/timer.h"
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
#include <pthread.h>
struct priv {
SLObjectItf sl, output_mix, player;
SLBufferQueueItf buffer_queue;
@ -37,7 +36,7 @@ struct priv {
SLPlayItf play;
void *buf;
int bytes_per_enqueue;
pthread_mutex_t buffer_lock;
mp_mutex buffer_lock;
double audio_latency;
int frames_per_enqueue;
@ -62,7 +61,7 @@ static void uninit(struct ao *ao)
p->engine = NULL;
p->play = NULL;
pthread_mutex_destroy(&p->buffer_lock);
mp_mutex_destroy(&p->buffer_lock);
free(p->buf);
p->buf = NULL;
@ -77,7 +76,7 @@ static void buffer_callback(SLBufferQueueItf buffer_queue, void *context)
SLresult res;
double delay;
pthread_mutex_lock(&p->buffer_lock);
mp_mutex_lock(&p->buffer_lock);
delay = p->frames_per_enqueue / (double)ao->samplerate;
delay += p->audio_latency;
@ -88,7 +87,7 @@ static void buffer_callback(SLBufferQueueItf buffer_queue, void *context)
if (res != SL_RESULT_SUCCESS)
MP_ERR(ao, "Failed to Enqueue: %d\n", res);
pthread_mutex_unlock(&p->buffer_lock);
mp_mutex_unlock(&p->buffer_lock);
}
#define CHK(stmt) \
@ -170,7 +169,7 @@ static int init(struct ao *ao)
goto error;
}
int r = pthread_mutex_init(&p->buffer_lock, NULL);
int r = mp_mutex_init(&p->buffer_lock);
if (r) {
MP_ERR(ao, "Failed to initialize the mutex: %d\n", r);
goto error;

View File

@ -24,7 +24,6 @@
#include <string.h>
#include <stdint.h>
#include <math.h>
#include <pthread.h>
#include <pulse/pulseaudio.h>

View File

@ -199,7 +199,7 @@ static DWORD __stdcall AudioThread(void *lpParameter)
{
struct ao *ao = lpParameter;
struct wasapi_state *state = ao->priv;
mpthread_set_name("ao/wasapi");
mp_thread_set_name("ao/wasapi");
CoInitializeEx(NULL, COINIT_APARTMENTTHREADED);
state->init_ok = wasapi_thread_init(ao);

View File

@ -16,7 +16,6 @@
*/
#include <stddef.h>
#include <pthread.h>
#include <inttypes.h>
#include <math.h>
#include <unistd.h>
@ -39,12 +38,12 @@
struct buffer_state {
// Buffer and AO
pthread_mutex_t lock;
pthread_cond_t wakeup;
mp_mutex lock;
mp_cond wakeup;
// Playthread sleep
pthread_mutex_t pt_lock;
pthread_cond_t pt_wakeup;
mp_mutex pt_lock;
mp_cond pt_wakeup;
// Access from AO driver's thread only.
char *convert_buffer;
@ -70,7 +69,7 @@ struct buffer_state {
bool hw_paused; // driver->set_pause() was used successfully
bool recover_pause; // non-hw_paused: needs to recover delay
struct mp_pcm_state prepause_state;
pthread_t thread; // thread shoveling data to AO
mp_thread thread; // thread shoveling data to AO
bool thread_valid; // thread is running
struct mp_aframe *temp_buf;
@ -79,15 +78,15 @@ struct buffer_state {
bool terminate; // exit thread
};
static void *playthread(void *arg);
static MP_THREAD_VOID playthread(void *arg);
void ao_wakeup_playthread(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
pthread_mutex_lock(&p->pt_lock);
mp_mutex_lock(&p->pt_lock);
p->need_wakeup = true;
pthread_cond_broadcast(&p->pt_wakeup);
pthread_mutex_unlock(&p->pt_lock);
mp_cond_broadcast(&p->pt_wakeup);
mp_mutex_unlock(&p->pt_lock);
}
// called locked
@ -184,7 +183,7 @@ int ao_read_data(struct ao *ao, void **data, int samples, int64_t out_time_ns)
struct buffer_state *p = ao->buffer_state;
assert(!ao->driver->write);
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
int pos = read_buffer(ao, data, samples, &(bool){0});
@ -195,10 +194,10 @@ int ao_read_data(struct ao *ao, void **data, int samples, int64_t out_time_ns)
p->playing = false;
ao->wakeup_cb(ao->wakeup_ctx);
// For ao_drain().
pthread_cond_broadcast(&p->wakeup);
mp_cond_broadcast(&p->wakeup);
}
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
return pos;
}
@ -248,12 +247,12 @@ int ao_control(struct ao *ao, enum aocontrol cmd, void *arg)
if (ao->driver->control) {
// Only need to lock in push mode.
if (ao->driver->write)
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
r = ao->driver->control(ao, cmd, arg);
if (ao->driver->write)
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
}
return r;
}
@ -262,7 +261,7 @@ double ao_get_delay(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
double driver_delay;
if (ao->driver->write) {
@ -279,7 +278,7 @@ double ao_get_delay(struct ao *ao)
if (p->pending)
pending += mp_aframe_get_size(p->pending);
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
return driver_delay + pending / (double)ao->samplerate;
}
@ -290,7 +289,7 @@ void ao_reset(struct ao *ao)
bool wakeup = false;
bool do_reset = false;
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
TA_FREEP(&p->pending);
mp_async_queue_reset(p->queue);
@ -313,7 +312,7 @@ void ao_reset(struct ao *ao)
p->hw_paused = false;
p->end_time_ns = 0;
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
if (do_reset)
ao->driver->reset(ao);
@ -331,7 +330,7 @@ void ao_start(struct ao *ao)
struct buffer_state *p = ao->buffer_state;
bool do_start = false;
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
p->playing = true;
@ -340,7 +339,7 @@ void ao_start(struct ao *ao)
do_start = true;
}
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
// Pull AOs might call ao_read_data() so do this outside the lock.
if (do_start)
@ -360,7 +359,7 @@ void ao_set_paused(struct ao *ao, bool paused, bool eof)
if (eof && paused && ao_is_playing(ao))
ao_drain(ao);
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
if ((p->playing || !ao->driver->write) && !p->paused && paused) {
if (p->streaming && !ao->stream_silence) {
@ -395,7 +394,7 @@ void ao_set_paused(struct ao *ao, bool paused, bool eof)
}
p->paused = paused;
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
if (do_reset)
ao->driver->reset(ao);
@ -414,9 +413,9 @@ bool ao_is_playing(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
bool playing = p->playing;
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
return playing;
}
@ -426,30 +425,31 @@ void ao_drain(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
while (!p->paused && p->playing) {
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
double delay = ao_get_delay(ao);
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
// Limit to buffer + arbitrary ~250ms max. waiting for robustness.
delay += mp_async_queue_get_samples(p->queue) / (double)ao->samplerate;
struct timespec ts = mp_rel_time_to_timespec(MPMAX(delay, 0) + 0.25);
// Wait for EOF signal from AO.
if (pthread_cond_timedwait(&p->wakeup, &p->lock, &ts)) {
if (mp_cond_timedwait(&p->wakeup, &p->lock,
MP_TIME_S_TO_NS(MPMAX(delay, 0) + 0.25)))
{
MP_VERBOSE(ao, "drain timeout\n");
break;
}
if (!p->playing && mp_async_queue_get_samples(p->queue)) {
MP_WARN(ao, "underrun during draining\n");
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
ao_start(ao);
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
}
}
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
ao_reset(ao);
}
@ -465,12 +465,12 @@ void ao_uninit(struct ao *ao)
struct buffer_state *p = ao->buffer_state;
if (p && p->thread_valid) {
pthread_mutex_lock(&p->pt_lock);
mp_mutex_lock(&p->pt_lock);
p->terminate = true;
pthread_cond_broadcast(&p->pt_wakeup);
pthread_mutex_unlock(&p->pt_lock);
mp_cond_broadcast(&p->pt_wakeup);
mp_mutex_unlock(&p->pt_lock);
pthread_join(p->thread, NULL);
mp_thread_join(p->thread);
p->thread_valid = false;
}
@ -484,11 +484,11 @@ void ao_uninit(struct ao *ao)
talloc_free(p->convert_buffer);
talloc_free(p->temp_buf);
pthread_cond_destroy(&p->wakeup);
pthread_mutex_destroy(&p->lock);
mp_cond_destroy(&p->wakeup);
mp_mutex_destroy(&p->lock);
pthread_cond_destroy(&p->pt_wakeup);
pthread_mutex_destroy(&p->pt_lock);
mp_cond_destroy(&p->pt_wakeup);
mp_mutex_destroy(&p->pt_lock);
}
talloc_free(ao);
@ -509,11 +509,11 @@ bool init_buffer_post(struct ao *ao)
assert(ao->driver->get_state);
}
pthread_mutex_init(&p->lock, NULL);
pthread_cond_init(&p->wakeup, NULL);
mp_mutex_init(&p->lock);
mp_cond_init(&p->wakeup);
pthread_mutex_init(&p->pt_lock, NULL);
pthread_cond_init(&p->pt_wakeup, NULL);
mp_mutex_init(&p->pt_lock);
mp_cond_init(&p->pt_wakeup);
p->queue = mp_async_queue_create();
p->filter_root = mp_filter_create_root(ao->global);
@ -532,7 +532,7 @@ bool init_buffer_post(struct ao *ao)
mp_filter_graph_set_wakeup_cb(p->filter_root, wakeup_filters, ao);
p->thread_valid = true;
if (pthread_create(&p->thread, NULL, playthread, ao)) {
if (mp_thread_create(&p->thread, playthread, ao)) {
p->thread_valid = false;
return false;
}
@ -651,17 +651,17 @@ eof:
}
ao->wakeup_cb(ao->wakeup_ctx);
// For ao_drain().
pthread_cond_broadcast(&p->wakeup);
mp_cond_broadcast(&p->wakeup);
return true;
}
static void *playthread(void *arg)
static MP_THREAD_VOID playthread(void *arg)
{
struct ao *ao = arg;
struct buffer_state *p = ao->buffer_state;
mpthread_set_name("ao");
mp_thread_set_name("ao");
while (1) {
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
bool retry = false;
if (!ao->driver->initially_blocked || p->initial_unblocked)
@ -677,32 +677,31 @@ static void *playthread(void *arg)
timeout = ao->device_buffer / (double)ao->samplerate * 0.25;
}
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
pthread_mutex_lock(&p->pt_lock);
mp_mutex_lock(&p->pt_lock);
if (p->terminate) {
pthread_mutex_unlock(&p->pt_lock);
mp_mutex_unlock(&p->pt_lock);
break;
}
if (!p->need_wakeup && !retry) {
MP_STATS(ao, "start audio wait");
struct timespec ts = mp_rel_time_to_timespec(timeout);
pthread_cond_timedwait(&p->pt_wakeup, &p->pt_lock, &ts);
mp_cond_timedwait(&p->pt_wakeup, &p->pt_lock, MP_TIME_S_TO_NS(timeout));
MP_STATS(ao, "end audio wait");
}
p->need_wakeup = false;
pthread_mutex_unlock(&p->pt_lock);
mp_mutex_unlock(&p->pt_lock);
}
return NULL;
MP_THREAD_RETURN();
}
void ao_unblock(struct ao *ao)
{
if (ao->driver->write) {
struct buffer_state *p = ao->buffer_state;
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
p->initial_unblocked = true;
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
ao_wakeup_playthread(ao);
}
}

View File

@ -18,7 +18,6 @@
#ifndef MP_AO_INTERNAL_H_
#define MP_AO_INTERNAL_H_
#include <pthread.h>
#include <stdatomic.h>
#include <stdbool.h>

View File

@ -22,13 +22,13 @@
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <pthread.h>
#include "av_log.h"
#include "config.h"
#include "common/common.h"
#include "common/global.h"
#include "common/msg.h"
#include "config.h"
#include "osdep/threads.h"
#include <libavutil/avutil.h>
#include <libavutil/log.h>
@ -51,7 +51,7 @@
// Needed because the av_log callback does not provide a library-safe message
// callback.
static pthread_mutex_t log_lock = PTHREAD_MUTEX_INITIALIZER;
static mp_static_mutex log_lock = MP_STATIC_MUTEX_INITIALIZER;
static struct mpv_global *log_mpv_instance;
static struct mp_log *log_root, *log_decaudio, *log_decvideo, *log_demuxer;
static bool log_print_prefix = true;
@ -113,10 +113,10 @@ static void mp_msg_av_log_callback(void *ptr, int level, const char *fmt,
int mp_level = av_log_level_to_mp_level(level);
// Note: mp_log is thread-safe, but destruction of the log instances is not.
pthread_mutex_lock(&log_lock);
mp_mutex_lock(&log_lock);
if (!log_mpv_instance) {
pthread_mutex_unlock(&log_lock);
mp_mutex_unlock(&log_lock);
// Fallback to stderr
vfprintf(stderr, fmt, vl);
return;
@ -138,12 +138,12 @@ static void mp_msg_av_log_callback(void *ptr, int level, const char *fmt,
mp_msg(log, mp_level, "%s", buffer);
}
pthread_mutex_unlock(&log_lock);
mp_mutex_unlock(&log_lock);
}
void init_libav(struct mpv_global *global)
{
pthread_mutex_lock(&log_lock);
mp_mutex_lock(&log_lock);
if (!log_mpv_instance) {
log_mpv_instance = global;
log_root = mp_log_new(NULL, global->log, "ffmpeg");
@ -152,7 +152,7 @@ void init_libav(struct mpv_global *global)
log_demuxer = mp_log_new(log_root, log_root, "demuxer");
av_log_set_callback(mp_msg_av_log_callback);
}
pthread_mutex_unlock(&log_lock);
mp_mutex_unlock(&log_lock);
avformat_network_init();
@ -163,13 +163,13 @@ void init_libav(struct mpv_global *global)
void uninit_libav(struct mpv_global *global)
{
pthread_mutex_lock(&log_lock);
mp_mutex_lock(&log_lock);
if (log_mpv_instance == global) {
av_log_set_callback(av_log_default_callback);
log_mpv_instance = NULL;
talloc_free(log_root);
}
pthread_mutex_unlock(&log_lock);
mp_mutex_unlock(&log_lock);
}
#define V(x) AV_VERSION_MAJOR(x), \

View File

@ -104,7 +104,7 @@ struct encode_lavc_context *encode_lavc_init(struct mpv_global *global)
.priv = talloc_zero(ctx, struct encode_priv),
.log = mp_log_new(ctx, global->log, "encode"),
};
pthread_mutex_init(&ctx->lock, NULL);
mp_mutex_init(&ctx->lock);
struct encode_priv *p = ctx->priv;
p->log = ctx->log;
@ -157,7 +157,7 @@ void encode_lavc_set_metadata(struct encode_lavc_context *ctx,
{
struct encode_priv *p = ctx->priv;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
if (ctx->options->copy_metadata) {
p->metadata = mp_tags_dup(ctx, metadata);
@ -184,7 +184,7 @@ void encode_lavc_set_metadata(struct encode_lavc_context *ctx,
}
}
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
bool encode_lavc_free(struct encode_lavc_context *ctx)
@ -220,7 +220,7 @@ bool encode_lavc_free(struct encode_lavc_context *ctx)
res = !p->failed;
pthread_mutex_destroy(&ctx->lock);
mp_mutex_destroy(&ctx->lock);
talloc_free(ctx);
return res;
@ -313,7 +313,7 @@ void encode_lavc_expect_stream(struct encode_lavc_context *ctx,
{
struct encode_priv *p = ctx->priv;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
enum AVMediaType codec_type = mp_to_av_stream_type(type);
@ -337,7 +337,7 @@ void encode_lavc_expect_stream(struct encode_lavc_context *ctx,
MP_TARRAY_APPEND(p, p->streams, p->num_streams, dst);
done:
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
// Signal that you are ready to encode (you provide the codec params etc. too).
@ -351,7 +351,7 @@ static void encode_lavc_add_stream(struct encoder_context *enc,
{
struct encode_priv *p = ctx->priv;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
struct mux_stream *dst = find_mux_stream(ctx, info->codecpar->codec_type);
if (!dst) {
@ -387,7 +387,7 @@ static void encode_lavc_add_stream(struct encoder_context *enc,
maybe_init_muxer(ctx);
done:
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
// Write a packet. This will take over ownership of `pkt`
@ -398,7 +398,7 @@ static void encode_lavc_add_packet(struct mux_stream *dst, AVPacket *pkt)
assert(dst->st);
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
if (p->failed)
goto done;
@ -435,7 +435,7 @@ static void encode_lavc_add_packet(struct mux_stream *dst, AVPacket *pkt)
pkt = NULL;
done:
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
if (pkt)
av_packet_unref(pkt);
}
@ -450,9 +450,9 @@ void encode_lavc_discontinuity(struct encode_lavc_context *ctx)
if (!ctx)
return;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
ctx->discontinuity_pts_offset = MP_NOPTS_VALUE;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
static void encode_lavc_printoptions(struct mp_log *log, const void *obj,
@ -673,7 +673,7 @@ int encode_lavc_getstatus(struct encode_lavc_context *ctx,
float minutes, megabytes, fps, x;
float f = MPMAX(0.0001, relative_position);
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
if (p->failed) {
snprintf(buf, bufsize, "(failed)\n");
@ -697,7 +697,7 @@ int encode_lavc_getstatus(struct encode_lavc_context *ctx,
buf[bufsize - 1] = 0;
done:
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return 0;
}
@ -705,9 +705,9 @@ bool encode_lavc_didfail(struct encode_lavc_context *ctx)
{
if (!ctx)
return false;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
bool fail = ctx->priv->failed;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return fail;
}

View File

@ -22,7 +22,6 @@
#ifndef MPLAYER_ENCODE_LAVC_H
#define MPLAYER_ENCODE_LAVC_H
#include <pthread.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
@ -33,6 +32,7 @@
#include "common/common.h"
#include "encode.h"
#include "osdep/threads.h"
#include "video/csputils.h"
struct encode_lavc_context {
@ -47,7 +47,7 @@ struct encode_lavc_context {
// All entry points must be guarded with the lock. Functions called by
// the playback core lock this automatically, but ao_lavc.c and vo_lavc.c
// must lock manually before accessing state.
pthread_mutex_t lock;
mp_mutex lock;
// anti discontinuity mode
double next_in_pts;

View File

@ -16,7 +16,6 @@
*/
#include <assert.h>
#include <pthread.h>
#include <stdarg.h>
#include <stdatomic.h>
#include <stdint.h>
@ -55,9 +54,9 @@
struct mp_log_root {
struct mpv_global *global;
pthread_mutex_t lock;
pthread_mutex_t log_file_lock;
pthread_cond_t log_file_wakeup;
mp_mutex lock;
mp_mutex log_file_lock;
mp_cond log_file_wakeup;
// --- protected by lock
char **msg_levels;
bool use_terminal; // make accesses to stderr/stdout
@ -83,7 +82,7 @@ struct mp_log_root {
// --- owner thread only (caller of mp_msg_init() etc.)
char *log_path;
char *stats_path;
pthread_t log_file_thread;
mp_thread log_file_thread;
// --- owner thread only, but frozen while log_file_thread is running
FILE *log_file;
struct mp_log_buffer *log_file_buffer;
@ -104,7 +103,7 @@ struct mp_log {
struct mp_log_buffer {
struct mp_log_root *root;
pthread_mutex_t lock;
mp_mutex lock;
// --- protected by lock
struct mp_log_buffer_entry **entries; // ringbuffer
int capacity; // total space in entries[]
@ -133,7 +132,7 @@ static bool match_mod(const char *name, const char *mod)
static void update_loglevel(struct mp_log *log)
{
struct mp_log_root *root = log->root;
pthread_mutex_lock(&root->lock);
mp_mutex_lock(&root->lock);
log->level = MSGL_STATUS + root->verbose; // default log level
if (root->really_quiet)
log->level = -1;
@ -155,7 +154,7 @@ static void update_loglevel(struct mp_log *log)
log->level = MPMAX(log->level, MSGL_STATS);
log->level = MPMIN(log->level, log->max_level);
atomic_store(&log->reload_counter, atomic_load(&log->root->reload_counter));
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
}
// Set (numerically) the maximum level that should still be output for this log
@ -164,9 +163,9 @@ void mp_msg_set_max_level(struct mp_log *log, int lev)
{
if (!log->root)
return;
pthread_mutex_lock(&log->root->lock);
mp_mutex_lock(&log->root->lock);
log->max_level = MPCLAMP(lev, -1, MSGL_MAX);
pthread_mutex_unlock(&log->root->lock);
mp_mutex_unlock(&log->root->lock);
update_loglevel(log);
}
@ -233,9 +232,9 @@ static void flush_status_line(struct mp_log_root *root)
void mp_msg_flush_status_line(struct mp_log *log)
{
if (log->root) {
pthread_mutex_lock(&log->root->lock);
mp_mutex_lock(&log->root->lock);
flush_status_line(log->root);
pthread_mutex_unlock(&log->root->lock);
mp_mutex_unlock(&log->root->lock);
}
}
@ -243,18 +242,18 @@ void mp_msg_set_term_title(struct mp_log *log, const char *title)
{
if (log->root && title) {
// Lock because printf to terminal is not necessarily atomic.
pthread_mutex_lock(&log->root->lock);
mp_mutex_lock(&log->root->lock);
fprintf(stderr, "\e]0;%s\007", title);
pthread_mutex_unlock(&log->root->lock);
mp_mutex_unlock(&log->root->lock);
}
}
bool mp_msg_has_status_line(struct mpv_global *global)
{
struct mp_log_root *root = global->log->root;
pthread_mutex_lock(&root->lock);
mp_mutex_lock(&root->lock);
bool r = root->status_lines > 0;
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
return r;
}
@ -352,7 +351,7 @@ static void write_msg_to_buffers(struct mp_log *log, int lev, char *text)
for (int n = 0; n < root->num_buffers; n++) {
struct mp_log_buffer *buffer = root->buffers[n];
bool wakeup = false;
pthread_mutex_lock(&buffer->lock);
mp_mutex_lock(&buffer->lock);
int buffer_level = buffer->level;
if (buffer_level == MP_LOG_BUFFER_MSGL_TERM)
buffer_level = log->terminal_level;
@ -366,16 +365,16 @@ static void write_msg_to_buffers(struct mp_log *log, int lev, char *text)
while (buffer->num_entries == buffer->capacity && !dead) {
// Temporary unlock is OK; buffer->level is immutable, and
// buffer can't go away because the global log lock is held.
pthread_mutex_unlock(&buffer->lock);
pthread_mutex_lock(&root->log_file_lock);
mp_mutex_unlock(&buffer->lock);
mp_mutex_lock(&root->log_file_lock);
if (root->log_file_thread_active) {
pthread_cond_wait(&root->log_file_wakeup,
mp_cond_wait(&root->log_file_wakeup,
&root->log_file_lock);
} else {
dead = true;
}
pthread_mutex_unlock(&root->log_file_lock);
pthread_mutex_lock(&buffer->lock);
mp_mutex_unlock(&root->log_file_lock);
mp_mutex_lock(&buffer->lock);
}
}
if (buffer->num_entries == buffer->capacity) {
@ -395,7 +394,7 @@ static void write_msg_to_buffers(struct mp_log *log, int lev, char *text)
if (buffer->wakeup_cb && !buffer->silent)
wakeup = true;
}
pthread_mutex_unlock(&buffer->lock);
mp_mutex_unlock(&buffer->lock);
if (wakeup)
buffer->wakeup_cb(buffer->wakeup_cb_ctx);
}
@ -415,7 +414,7 @@ void mp_msg_va(struct mp_log *log, int lev, const char *format, va_list va)
struct mp_log_root *root = log->root;
pthread_mutex_lock(&root->lock);
mp_mutex_lock(&root->lock);
root->buffer.len = 0;
@ -461,7 +460,7 @@ void mp_msg_va(struct mp_log *log, int lev, const char *format, va_list va)
}
}
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
}
static void destroy_log(void *ptr)
@ -527,9 +526,9 @@ void mp_msg_init(struct mpv_global *global)
.reload_counter = 1,
};
pthread_mutex_init(&root->lock, NULL);
pthread_mutex_init(&root->log_file_lock, NULL);
pthread_cond_init(&root->log_file_wakeup, NULL);
mp_mutex_init(&root->lock);
mp_mutex_init(&root->log_file_lock);
mp_cond_init(&root->log_file_wakeup);
struct mp_log dummy = { .root = root };
struct mp_log *log = mp_log_new(root, &dummy, "");
@ -537,44 +536,44 @@ void mp_msg_init(struct mpv_global *global)
global->log = log;
}
static void *log_file_thread(void *p)
static MP_THREAD_VOID log_file_thread(void *p)
{
struct mp_log_root *root = p;
mpthread_set_name("log");
mp_thread_set_name("log");
pthread_mutex_lock(&root->log_file_lock);
mp_mutex_lock(&root->log_file_lock);
while (root->log_file_thread_active) {
struct mp_log_buffer_entry *e =
mp_msg_log_buffer_read(root->log_file_buffer);
if (e) {
pthread_mutex_unlock(&root->log_file_lock);
mp_mutex_unlock(&root->log_file_lock);
fprintf(root->log_file, "[%8.3f][%c][%s] %s",
mp_time_sec(),
mp_log_levels[e->level][0], e->prefix, e->text);
fflush(root->log_file);
pthread_mutex_lock(&root->log_file_lock);
mp_mutex_lock(&root->log_file_lock);
talloc_free(e);
// Multiple threads might be blocked if the log buffer was full.
pthread_cond_broadcast(&root->log_file_wakeup);
mp_cond_broadcast(&root->log_file_wakeup);
} else {
pthread_cond_wait(&root->log_file_wakeup, &root->log_file_lock);
mp_cond_wait(&root->log_file_wakeup, &root->log_file_lock);
}
}
pthread_mutex_unlock(&root->log_file_lock);
mp_mutex_unlock(&root->log_file_lock);
return NULL;
MP_THREAD_RETURN();
}
static void wakeup_log_file(void *p)
{
struct mp_log_root *root = p;
pthread_mutex_lock(&root->log_file_lock);
pthread_cond_broadcast(&root->log_file_wakeup);
pthread_mutex_unlock(&root->log_file_lock);
mp_mutex_lock(&root->log_file_lock);
mp_cond_broadcast(&root->log_file_wakeup);
mp_mutex_unlock(&root->log_file_lock);
}
// Only to be called from the main thread.
@ -582,16 +581,16 @@ static void terminate_log_file_thread(struct mp_log_root *root)
{
bool wait_terminate = false;
pthread_mutex_lock(&root->log_file_lock);
mp_mutex_lock(&root->log_file_lock);
if (root->log_file_thread_active) {
root->log_file_thread_active = false;
pthread_cond_broadcast(&root->log_file_wakeup);
mp_cond_broadcast(&root->log_file_wakeup);
wait_terminate = true;
}
pthread_mutex_unlock(&root->log_file_lock);
mp_mutex_unlock(&root->log_file_lock);
if (wait_terminate)
pthread_join(root->log_file_thread, NULL);
mp_thread_join(root->log_file_thread);
mp_msg_log_buffer_destroy(root->log_file_buffer);
root->log_file_buffer = NULL;
@ -631,7 +630,7 @@ void mp_msg_update_msglevels(struct mpv_global *global, struct MPOpts *opts)
{
struct mp_log_root *root = global->log->root;
pthread_mutex_lock(&root->lock);
mp_mutex_lock(&root->lock);
root->verbose = opts->verbose;
root->really_quiet = opts->msg_really_quiet;
@ -645,7 +644,7 @@ void mp_msg_update_msglevels(struct mpv_global *global, struct MPOpts *opts)
m_option_type_msglevels.copy(NULL, &root->msg_levels, &opts->msg_levels);
atomic_fetch_add(&root->reload_counter, 1);
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
if (check_new_path(global, opts->log_file, &root->log_path)) {
terminate_log_file_thread(root);
@ -655,11 +654,11 @@ void mp_msg_update_msglevels(struct mpv_global *global, struct MPOpts *opts)
// if a logfile is created and the early filebuf still exists,
// flush and destroy the early buffer
pthread_mutex_lock(&root->lock);
mp_mutex_lock(&root->lock);
struct mp_log_buffer *earlybuf = root->early_filebuffer;
if (earlybuf)
root->early_filebuffer = NULL; // but it still logs msgs
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
if (earlybuf) {
// flush, destroy before creating the normal logfile buf,
@ -681,7 +680,7 @@ void mp_msg_update_msglevels(struct mpv_global *global, struct MPOpts *opts)
mp_msg_log_buffer_new(global, FILE_BUF, MP_LOG_BUFFER_MSGL_LOGFILE,
wakeup_log_file, root);
root->log_file_thread_active = true;
if (pthread_create(&root->log_file_thread, NULL, log_file_thread,
if (mp_thread_create(&root->log_file_thread, log_file_thread,
root))
{
root->log_file_thread_active = false;
@ -697,7 +696,7 @@ void mp_msg_update_msglevels(struct mpv_global *global, struct MPOpts *opts)
if (check_new_path(global, opts->dump_stats, &root->stats_path)) {
bool open_error = false;
pthread_mutex_lock(&root->lock);
mp_mutex_lock(&root->lock);
if (root->stats_file)
fclose(root->stats_file);
root->stats_file = NULL;
@ -705,7 +704,7 @@ void mp_msg_update_msglevels(struct mpv_global *global, struct MPOpts *opts)
root->stats_file = fopen(root->stats_path, "wb");
open_error = !root->stats_file;
}
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
if (open_error) {
mp_err(global->log, "Failed to open stats file '%s'\n",
@ -718,9 +717,9 @@ void mp_msg_force_stderr(struct mpv_global *global, bool force_stderr)
{
struct mp_log_root *root = global->log->root;
pthread_mutex_lock(&root->lock);
mp_mutex_lock(&root->lock);
root->force_stderr = force_stderr;
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
}
// Only to be called from the main thread.
@ -743,9 +742,9 @@ void mp_msg_uninit(struct mpv_global *global)
talloc_free(root->stats_path);
talloc_free(root->log_path);
m_option_type_msglevels.free(&root->msg_levels);
pthread_mutex_destroy(&root->lock);
pthread_mutex_destroy(&root->log_file_lock);
pthread_cond_destroy(&root->log_file_wakeup);
mp_mutex_destroy(&root->lock);
mp_mutex_destroy(&root->log_file_lock);
mp_cond_destroy(&root->log_file_wakeup);
talloc_free(root);
global->log = NULL;
}
@ -773,26 +772,26 @@ static void mp_msg_set_early_logging_raw(struct mpv_global *global, bool enable,
int size, int level)
{
struct mp_log_root *root = global->log->root;
pthread_mutex_lock(&root->lock);
mp_mutex_lock(&root->lock);
if (enable != !!*root_logbuf) {
if (enable) {
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
struct mp_log_buffer *buf =
mp_msg_log_buffer_new(global, size, level, NULL, NULL);
pthread_mutex_lock(&root->lock);
mp_mutex_lock(&root->lock);
assert(!*root_logbuf); // no concurrent calls to this function
*root_logbuf = buf;
} else {
struct mp_log_buffer *buf = *root_logbuf;
*root_logbuf = NULL;
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
mp_msg_log_buffer_destroy(buf);
return;
}
}
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
}
void mp_msg_set_early_logging(struct mpv_global *global, bool enable)
@ -814,7 +813,7 @@ struct mp_log_buffer *mp_msg_log_buffer_new(struct mpv_global *global,
{
struct mp_log_root *root = global->log->root;
pthread_mutex_lock(&root->lock);
mp_mutex_lock(&root->lock);
if (level == MP_LOG_BUFFER_MSGL_TERM) {
size = TERM_BUF;
@ -828,7 +827,7 @@ struct mp_log_buffer *mp_msg_log_buffer_new(struct mpv_global *global,
root->early_buffer = NULL;
buffer->wakeup_cb = wakeup_cb;
buffer->wakeup_cb_ctx = wakeup_cb_ctx;
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
return buffer;
}
}
@ -845,21 +844,21 @@ struct mp_log_buffer *mp_msg_log_buffer_new(struct mpv_global *global,
.wakeup_cb_ctx = wakeup_cb_ctx,
};
pthread_mutex_init(&buffer->lock, NULL);
mp_mutex_init(&buffer->lock);
MP_TARRAY_APPEND(root, root->buffers, root->num_buffers, buffer);
atomic_fetch_add(&root->reload_counter, 1);
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
return buffer;
}
void mp_msg_log_buffer_set_silent(struct mp_log_buffer *buffer, bool silent)
{
pthread_mutex_lock(&buffer->lock);
mp_mutex_lock(&buffer->lock);
buffer->silent = silent;
pthread_mutex_unlock(&buffer->lock);
mp_mutex_unlock(&buffer->lock);
}
void mp_msg_log_buffer_destroy(struct mp_log_buffer *buffer)
@ -869,7 +868,7 @@ void mp_msg_log_buffer_destroy(struct mp_log_buffer *buffer)
struct mp_log_root *root = buffer->root;
pthread_mutex_lock(&root->lock);
mp_mutex_lock(&root->lock);
for (int n = 0; n < root->num_buffers; n++) {
if (root->buffers[n] == buffer) {
@ -885,11 +884,11 @@ found:
while (buffer->num_entries)
talloc_free(log_buffer_read(buffer));
pthread_mutex_destroy(&buffer->lock);
mp_mutex_destroy(&buffer->lock);
talloc_free(buffer);
atomic_fetch_add(&root->reload_counter, 1);
pthread_mutex_unlock(&root->lock);
mp_mutex_unlock(&root->lock);
}
// Return a queued message, or if the buffer is empty, NULL.
@ -898,7 +897,7 @@ struct mp_log_buffer_entry *mp_msg_log_buffer_read(struct mp_log_buffer *buffer)
{
struct mp_log_buffer_entry *res = NULL;
pthread_mutex_lock(&buffer->lock);
mp_mutex_lock(&buffer->lock);
if (!buffer->silent && buffer->num_entries) {
if (buffer->dropped) {
@ -916,7 +915,7 @@ struct mp_log_buffer_entry *mp_msg_log_buffer_read(struct mp_log_buffer *buffer)
}
}
pthread_mutex_unlock(&buffer->lock);
mp_mutex_unlock(&buffer->lock);
return res;
}

View File

@ -9,6 +9,7 @@
#include "misc/node.h"
#include "msg.h"
#include "options/m_option.h"
#include "osdep/threads.h"
#include "osdep/timer.h"
#include "stats.h"
@ -17,7 +18,7 @@ struct stats_base {
atomic_bool active;
pthread_mutex_t lock;
mp_mutex lock;
struct {
struct stats_ctx *head, *tail;
@ -60,27 +61,12 @@ struct stat_entry {
int64_t val_th;
int64_t time_start_ns;
int64_t cpu_start_ns;
pthread_t thread;
mp_thread thread;
};
#define IS_ACTIVE(ctx) \
(atomic_load_explicit(&(ctx)->base->active, memory_order_relaxed))
// Overflows only after I'm dead.
static int64_t get_thread_cpu_time_ns(pthread_t thread)
{
#if defined(_POSIX_TIMERS) && _POSIX_TIMERS > 0 && defined(_POSIX_THREAD_CPUTIME)
clockid_t id;
struct timespec tv;
if (pthread_getcpuclockid(thread, &id) == 0 &&
clock_gettime(id, &tv) == 0)
{
return tv.tv_sec * (1000LL * 1000LL * 1000LL) + tv.tv_nsec;
}
#endif
return 0;
}
static void stats_destroy(void *p)
{
struct stats_base *stats = p;
@ -88,7 +74,7 @@ static void stats_destroy(void *p)
// All entries must have been destroyed before this.
assert(!stats->list.head);
pthread_mutex_destroy(&stats->lock);
mp_mutex_destroy(&stats->lock);
}
void stats_global_init(struct mpv_global *global)
@ -96,7 +82,7 @@ void stats_global_init(struct mpv_global *global)
assert(!global->stats);
struct stats_base *stats = talloc_zero(global, struct stats_base);
ta_set_destructor(stats, stats_destroy);
pthread_mutex_init(&stats->lock, NULL);
mp_mutex_init(&stats->lock);
global->stats = stats;
stats->global = global;
@ -126,7 +112,7 @@ void stats_global_query(struct mpv_global *global, struct mpv_node *out)
struct stats_base *stats = global->stats;
assert(stats);
pthread_mutex_lock(&stats->lock);
mp_mutex_lock(&stats->lock);
atomic_store(&stats->active, true);
@ -195,7 +181,7 @@ void stats_global_query(struct mpv_global *global, struct mpv_node *out)
break;
}
case VAL_THREAD_CPU_TIME: {
int64_t t = get_thread_cpu_time_ns(e->thread);
int64_t t = mp_thread_cpu_time_ns(e->thread);
if (!e->cpu_start_ns)
e->cpu_start_ns = t;
double t_msec = MP_TIME_NS_TO_MS(t - e->cpu_start_ns);
@ -207,17 +193,17 @@ void stats_global_query(struct mpv_global *global, struct mpv_node *out)
}
}
pthread_mutex_unlock(&stats->lock);
mp_mutex_unlock(&stats->lock);
}
static void stats_ctx_destroy(void *p)
{
struct stats_ctx *ctx = p;
pthread_mutex_lock(&ctx->base->lock);
mp_mutex_lock(&ctx->base->lock);
LL_REMOVE(list, &ctx->base->list, ctx);
ctx->base->num_entries = 0; // invalidate
pthread_mutex_unlock(&ctx->base->lock);
mp_mutex_unlock(&ctx->base->lock);
}
struct stats_ctx *stats_ctx_create(void *ta_parent, struct mpv_global *global,
@ -231,10 +217,10 @@ struct stats_ctx *stats_ctx_create(void *ta_parent, struct mpv_global *global,
ctx->prefix = talloc_strdup(ctx, prefix);
ta_set_destructor(ctx, stats_ctx_destroy);
pthread_mutex_lock(&base->lock);
mp_mutex_lock(&base->lock);
LL_APPEND(list, &base->list, ctx);
base->num_entries = 0; // invalidate
pthread_mutex_unlock(&base->lock);
mp_mutex_unlock(&base->lock);
return ctx;
}
@ -263,11 +249,11 @@ static void static_value(struct stats_ctx *ctx, const char *name, double val,
{
if (!IS_ACTIVE(ctx))
return;
pthread_mutex_lock(&ctx->base->lock);
mp_mutex_lock(&ctx->base->lock);
struct stat_entry *e = find_entry(ctx, name);
e->val_d = val;
e->type = type;
pthread_mutex_unlock(&ctx->base->lock);
mp_mutex_unlock(&ctx->base->lock);
}
void stats_value(struct stats_ctx *ctx, const char *name, double val)
@ -285,11 +271,11 @@ void stats_time_start(struct stats_ctx *ctx, const char *name)
MP_STATS(ctx->base->global, "start %s", name);
if (!IS_ACTIVE(ctx))
return;
pthread_mutex_lock(&ctx->base->lock);
mp_mutex_lock(&ctx->base->lock);
struct stat_entry *e = find_entry(ctx, name);
e->cpu_start_ns = get_thread_cpu_time_ns(pthread_self());
e->cpu_start_ns = mp_thread_cpu_time_ns(mp_thread_self());
e->time_start_ns = mp_time_ns();
pthread_mutex_unlock(&ctx->base->lock);
mp_mutex_unlock(&ctx->base->lock);
}
void stats_time_end(struct stats_ctx *ctx, const char *name)
@ -297,36 +283,36 @@ void stats_time_end(struct stats_ctx *ctx, const char *name)
MP_STATS(ctx->base->global, "end %s", name);
if (!IS_ACTIVE(ctx))
return;
pthread_mutex_lock(&ctx->base->lock);
mp_mutex_lock(&ctx->base->lock);
struct stat_entry *e = find_entry(ctx, name);
if (e->time_start_ns) {
e->type = VAL_TIME;
e->val_rt += mp_time_ns() - e->time_start_ns;
e->val_th += get_thread_cpu_time_ns(pthread_self()) - e->cpu_start_ns;
e->val_th += mp_thread_cpu_time_ns(mp_thread_self()) - e->cpu_start_ns;
e->time_start_ns = 0;
}
pthread_mutex_unlock(&ctx->base->lock);
mp_mutex_unlock(&ctx->base->lock);
}
void stats_event(struct stats_ctx *ctx, const char *name)
{
if (!IS_ACTIVE(ctx))
return;
pthread_mutex_lock(&ctx->base->lock);
mp_mutex_lock(&ctx->base->lock);
struct stat_entry *e = find_entry(ctx, name);
e->val_d += 1;
e->type = VAL_INC;
pthread_mutex_unlock(&ctx->base->lock);
mp_mutex_unlock(&ctx->base->lock);
}
static void register_thread(struct stats_ctx *ctx, const char *name,
enum val_type type)
{
pthread_mutex_lock(&ctx->base->lock);
mp_mutex_lock(&ctx->base->lock);
struct stat_entry *e = find_entry(ctx, name);
e->type = type;
e->thread = pthread_self();
pthread_mutex_unlock(&ctx->base->lock);
e->thread = mp_thread_self();
mp_mutex_unlock(&ctx->base->lock);
}
void stats_register_thread_cputime(struct stats_ctx *ctx, const char *name)

View File

@ -30,5 +30,5 @@ void stats_event(struct stats_ctx *ctx, const char *name);
// or stats_unregister_thread() is called, otherwise UB will occur.
void stats_register_thread_cputime(struct stats_ctx *ctx, const char *name);
// Remove reference to pthread_self().
// Remove reference to mp_thread_self().
void stats_unregister_thread(struct stats_ctx *ctx, const char *name);

View File

@ -19,7 +19,6 @@
#include <float.h>
#include <limits.h>
#include <math.h>
#include <pthread.h>
#include <stdatomic.h>
#include <stdint.h>
#include <stdio.h>
@ -160,9 +159,9 @@ struct demux_internal {
// The lock protects the packet queues (struct demux_stream),
// and the fields below.
pthread_mutex_t lock;
pthread_cond_t wakeup;
pthread_t thread;
mp_mutex lock;
mp_cond wakeup;
mp_thread thread;
// -- All the following fields are protected by lock.
@ -431,7 +430,7 @@ struct demux_stream {
static void switch_to_fresh_cache_range(struct demux_internal *in);
static void demuxer_sort_chapters(demuxer_t *demuxer);
static void *demux_thread(void *pctx);
static MP_THREAD_VOID demux_thread(void *pctx);
static void update_cache(struct demux_internal *in);
static void add_packet_locked(struct sh_stream *stream, demux_packet_t *dp);
static struct demux_packet *advance_reader_head(struct demux_stream *ds);
@ -857,7 +856,7 @@ static void wakeup_ds(struct demux_stream *ds)
ds->in->wakeup_cb(ds->in->wakeup_cb_ctx);
}
ds->need_wakeup = false;
pthread_cond_signal(&ds->in->wakeup);
mp_cond_signal(&ds->in->wakeup);
}
}
@ -920,9 +919,9 @@ static void update_stream_selection_state(struct demux_internal *in,
void demux_set_ts_offset(struct demuxer *demuxer, double offset)
{
struct demux_internal *in = demuxer->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->ts_offset = offset;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
static void add_missing_streams(struct demux_internal *in,
@ -1036,9 +1035,9 @@ void demux_add_sh_stream(struct demuxer *demuxer, struct sh_stream *sh)
{
struct demux_internal *in = demuxer->in;
assert(demuxer == in->d_thread);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
demux_add_sh_stream_locked(in, sh);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
// Return a stream with the given index. Since streams can only be added during
@ -1048,10 +1047,10 @@ void demux_add_sh_stream(struct demuxer *demuxer, struct sh_stream *sh)
struct sh_stream *demux_get_stream(struct demuxer *demuxer, int index)
{
struct demux_internal *in = demuxer->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
assert(index >= 0 && index < in->num_streams);
struct sh_stream *r = in->streams[index];
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return r;
}
@ -1059,9 +1058,9 @@ struct sh_stream *demux_get_stream(struct demuxer *demuxer, int index)
int demux_get_num_stream(struct demuxer *demuxer)
{
struct demux_internal *in = demuxer->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
int r = in->num_streams;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return r;
}
@ -1100,8 +1099,8 @@ static void demux_dealloc(struct demux_internal *in)
{
for (int n = 0; n < in->num_streams; n++)
talloc_free(in->streams[n]);
pthread_mutex_destroy(&in->lock);
pthread_cond_destroy(&in->wakeup);
mp_mutex_destroy(&in->lock);
mp_cond_destroy(&in->wakeup);
talloc_free(in->d_user);
}
@ -1132,11 +1131,11 @@ struct demux_free_async_state *demux_free_async(struct demuxer *demuxer)
if (!in->threading)
return NULL;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->thread_terminate = true;
in->shutdown_async = true;
pthread_cond_signal(&in->wakeup);
pthread_mutex_unlock(&in->lock);
mp_cond_signal(&in->wakeup);
mp_mutex_unlock(&in->lock);
return (struct demux_free_async_state *)demuxer->in; // lies
}
@ -1160,9 +1159,9 @@ bool demux_free_async_finish(struct demux_free_async_state *state)
{
struct demux_internal *in = (struct demux_internal *)state; // reverse lies
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
bool busy = in->shutdown_async;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
if (busy)
return false;
@ -1191,7 +1190,7 @@ void demux_start_thread(struct demuxer *demuxer)
if (!in->threading) {
in->threading = true;
if (pthread_create(&in->thread, NULL, demux_thread, in))
if (mp_thread_create(&in->thread, demux_thread, in))
in->threading = false;
}
}
@ -1202,11 +1201,11 @@ void demux_stop_thread(struct demuxer *demuxer)
assert(demuxer == in->d_user);
if (in->threading) {
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->thread_terminate = true;
pthread_cond_signal(&in->wakeup);
pthread_mutex_unlock(&in->lock);
pthread_join(in->thread, NULL);
mp_cond_signal(&in->wakeup);
mp_mutex_unlock(&in->lock);
mp_thread_join(in->thread);
in->threading = false;
in->thread_terminate = false;
}
@ -1216,10 +1215,10 @@ void demux_stop_thread(struct demuxer *demuxer)
void demux_set_wakeup_cb(struct demuxer *demuxer, void (*cb)(void *ctx), void *ctx)
{
struct demux_internal *in = demuxer->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->wakeup_cb = cb;
in->wakeup_cb_ctx = ctx;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
void demux_start_prefetch(struct demuxer *demuxer)
@ -1227,10 +1226,10 @@ void demux_start_prefetch(struct demuxer *demuxer)
struct demux_internal *in = demuxer->in;
assert(demuxer == in->d_user);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->reading = true;
pthread_cond_signal(&in->wakeup);
pthread_mutex_unlock(&in->lock);
mp_cond_signal(&in->wakeup);
mp_mutex_unlock(&in->lock);
}
const char *stream_type_name(enum stream_type type)
@ -1267,10 +1266,10 @@ void demuxer_feed_caption(struct sh_stream *stream, demux_packet_t *dp)
{
struct demux_internal *in = stream->ds->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
struct sh_stream *sh = demuxer_get_cc_track_locked(stream);
if (!sh) {
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
talloc_free(dp);
return;
}
@ -1280,7 +1279,7 @@ void demuxer_feed_caption(struct sh_stream *stream, demux_packet_t *dp)
dp->dts = MP_ADD_PTS(dp->dts, -in->ts_offset);
dp->stream = sh->index;
add_packet_locked(sh, dp);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
static void error_on_backward_demuxing(struct demux_internal *in)
@ -1317,8 +1316,8 @@ static void perform_backward_seek(struct demux_internal *in)
in->reading = true;
// Don't starve other threads.
pthread_mutex_unlock(&in->lock);
pthread_mutex_lock(&in->lock);
mp_mutex_unlock(&in->lock);
mp_mutex_lock(&in->lock);
}
// For incremental backward demuxing search work.
@ -1552,7 +1551,7 @@ resume_earlier:
ds->reader_head = t;
ds->back_need_recheck = true;
in->back_any_need_recheck = true;
pthread_cond_signal(&in->wakeup);
mp_cond_signal(&in->wakeup);
} else {
ds->back_seek_pos -= in->d_user->opts->back_seek_size;
in->need_back_seek = true;
@ -2253,7 +2252,7 @@ static bool read_packet(struct demux_internal *in)
in->reading = true;
in->after_seek = false;
in->after_seek_to_start = false;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
struct demuxer *demux = in->d_thread;
struct demux_packet *pkt = NULL;
@ -2262,7 +2261,7 @@ static bool read_packet(struct demux_internal *in)
if (demux->desc->read_packet && !demux_cancel_test(demux))
eof = !demux->desc->read_packet(demux, &pkt);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
update_cache(in);
if (pkt) {
@ -2278,7 +2277,7 @@ static bool read_packet(struct demux_internal *in)
if (!in->eof) {
if (in->wakeup_cb)
in->wakeup_cb(in->wakeup_cb_ctx);
pthread_cond_signal(&in->wakeup);
mp_cond_signal(&in->wakeup);
MP_VERBOSE(in, "EOF reached.\n");
}
}
@ -2407,12 +2406,12 @@ static void execute_trackswitch(struct demux_internal *in)
{
in->tracks_switched = false;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
if (in->d_thread->desc->switched_tracks)
in->d_thread->desc->switched_tracks(in->d_thread);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
}
static void execute_seek(struct demux_internal *in)
@ -2435,7 +2434,7 @@ static void execute_seek(struct demux_internal *in)
if (in->recorder)
mp_recorder_mark_discontinuity(in->recorder);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
MP_VERBOSE(in, "execute seek (to %f flags %d)\n", pts, flags);
@ -2444,7 +2443,7 @@ static void execute_seek(struct demux_internal *in)
MP_VERBOSE(in, "seek done\n");
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->seeking_in_progress = MP_NOPTS_VALUE;
}
@ -2545,26 +2544,25 @@ static bool thread_work(struct demux_internal *in)
return false;
}
static void *demux_thread(void *pctx)
static MP_THREAD_VOID demux_thread(void *pctx)
{
struct demux_internal *in = pctx;
mpthread_set_name("demux");
pthread_mutex_lock(&in->lock);
mp_thread_set_name("demux");
mp_mutex_lock(&in->lock);
stats_register_thread_cputime(in->stats, "thread");
while (!in->thread_terminate) {
if (thread_work(in))
continue;
pthread_cond_signal(&in->wakeup);
struct timespec until = mp_time_ns_to_realtime(in->next_cache_update);
pthread_cond_timedwait(&in->wakeup, &in->lock, &until);
mp_cond_signal(&in->wakeup);
mp_cond_timedwait_until(&in->wakeup, &in->lock, in->next_cache_update);
}
if (in->shutdown_async) {
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
demux_shutdown(in);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->shutdown_async = false;
if (in->wakeup_cb)
in->wakeup_cb(in->wakeup_cb_ctx);
@ -2572,8 +2570,8 @@ static void *demux_thread(void *pctx)
stats_unregister_thread(in->stats, "thread");
pthread_mutex_unlock(&in->lock);
return NULL;
mp_mutex_unlock(&in->lock);
MP_THREAD_RETURN();
}
// Low-level part of dequeueing a packet.
@ -2645,7 +2643,7 @@ static int dequeue_packet(struct demux_stream *ds, double min_pts,
if (!in->reading && !in->eof) {
in->reading = true; // enable demuxer thread prefetching
pthread_cond_signal(&in->wakeup);
mp_cond_signal(&in->wakeup);
}
ds->force_read_until = min_pts;
@ -2784,7 +2782,7 @@ int demux_read_packet_async_until(struct sh_stream *sh, double min_pts,
return -1;
struct demux_internal *in = ds->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
int r = -1;
while (1) {
r = dequeue_packet(ds, min_pts, out_pkt);
@ -2793,7 +2791,7 @@ int demux_read_packet_async_until(struct sh_stream *sh, double min_pts,
// Needs to actually read packets until we got a packet or EOF.
thread_work(in);
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return r;
}
@ -2802,7 +2800,7 @@ int demux_read_packet_async_until(struct sh_stream *sh, double min_pts,
struct demux_packet *demux_read_any_packet(struct demuxer *demuxer)
{
struct demux_internal *in = demuxer->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
assert(!in->threading); // doesn't work with threading
struct demux_packet *out_pkt = NULL;
bool read_more = true;
@ -2820,7 +2818,7 @@ struct demux_packet *demux_read_any_packet(struct demuxer *demuxer)
read_more &= !all_eof;
}
done:
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return out_pkt;
}
@ -3043,7 +3041,7 @@ void demux_stream_tags_changed(struct demuxer *demuxer, struct sh_stream *sh,
struct demux_stream *ds = sh ? sh->ds : NULL;
assert(!sh || ds); // stream must have been added
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
if (pts == MP_NOPTS_VALUE) {
MP_WARN(in, "Discarding timed metadata without timestamp.\n");
@ -3052,7 +3050,7 @@ void demux_stream_tags_changed(struct demuxer *demuxer, struct sh_stream *sh,
}
talloc_free(tags);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
// This is called by demuxer implementations if demuxer->metadata changed.
@ -3062,9 +3060,9 @@ void demux_metadata_changed(demuxer_t *demuxer)
assert(demuxer == demuxer->in->d_thread); // call from demuxer impl. only
struct demux_internal *in = demuxer->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
add_timed_metadata(in, demuxer->metadata, NULL, MP_NOPTS_VALUE);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
// Called locked, with user demuxer.
@ -3142,7 +3140,7 @@ void demux_update(demuxer_t *demuxer, double pts)
assert(demuxer == demuxer->in->d_user);
struct demux_internal *in = demuxer->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
if (!in->threading)
update_cache(in);
@ -3169,7 +3167,7 @@ void demux_update(demuxer_t *demuxer, double pts)
if (demuxer->events & DEMUX_EVENT_DURATION)
demuxer->duration = in->duration;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
static void demux_init_cuesheet(struct demuxer *demuxer)
@ -3218,13 +3216,13 @@ static void demux_init_ccs(struct demuxer *demuxer, struct demux_opts *opts)
struct demux_internal *in = demuxer->in;
if (!opts->create_ccs)
return;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
for (int n = 0; n < in->num_streams; n++) {
struct sh_stream *sh = in->streams[n];
if (sh->type == STREAM_VIDEO && !sh->attached_picture)
demuxer_get_cc_track_locked(sh);
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
// Return whether "heavy" caching on this stream is enabled. By default, this
@ -3234,9 +3232,9 @@ static void demux_init_ccs(struct demuxer *demuxer, struct demux_opts *opts)
bool demux_is_network_cached(demuxer_t *demuxer)
{
struct demux_internal *in = demuxer->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
bool r = in->using_network_cache_opts;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return r;
}
@ -3300,8 +3298,8 @@ static struct demuxer *open_given_type(struct mpv_global *global,
.demux_ts = MP_NOPTS_VALUE,
.owns_stream = !params->external_stream,
};
pthread_mutex_init(&in->lock, NULL);
pthread_cond_init(&in->wakeup, NULL);
mp_mutex_init(&in->lock);
mp_cond_init(&in->wakeup);
*in->d_thread = *demuxer;
@ -3507,7 +3505,7 @@ void demux_flush(demuxer_t *demuxer)
struct demux_internal *in = demuxer->in;
assert(demuxer == in->d_user);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
clear_reader_state(in, true);
for (int n = 0; n < in->num_ranges; n++)
clear_cached_range(in, in->ranges[n]);
@ -3519,7 +3517,7 @@ void demux_flush(demuxer_t *demuxer)
}
in->eof = false;
in->seeking = false;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
// Does some (but not all) things for switching to another range.
@ -3783,15 +3781,15 @@ int demux_seek(demuxer_t *demuxer, double seek_pts, int flags)
struct demux_internal *in = demuxer->in;
assert(demuxer == in->d_user);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
if (!(flags & SEEK_FACTOR))
seek_pts = MP_ADD_PTS(seek_pts, -in->ts_offset);
int res = queue_seek(in, seek_pts, flags, true);
pthread_cond_signal(&in->wakeup);
pthread_mutex_unlock(&in->lock);
mp_cond_signal(&in->wakeup);
mp_mutex_unlock(&in->lock);
return res;
}
@ -3967,7 +3965,7 @@ void demuxer_select_track(struct demuxer *demuxer, struct sh_stream *stream,
{
struct demux_internal *in = demuxer->in;
struct demux_stream *ds = stream->ds;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
ref_pts = MP_ADD_PTS(ref_pts, -in->ts_offset);
// don't flush buffers if stream is already selected / unselected
if (ds->selected != selected) {
@ -3982,12 +3980,12 @@ void demuxer_select_track(struct demuxer *demuxer, struct sh_stream *stream,
initiate_refresh_seek(in, ds, ref_pts);
}
if (in->threading) {
pthread_cond_signal(&in->wakeup);
mp_cond_signal(&in->wakeup);
} else {
execute_trackswitch(in);
}
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
// Execute a refresh seek on the given stream.
@ -3997,7 +3995,7 @@ void demuxer_refresh_track(struct demuxer *demuxer, struct sh_stream *stream,
{
struct demux_internal *in = demuxer->in;
struct demux_stream *ds = stream->ds;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
ref_pts = MP_ADD_PTS(ref_pts, -in->ts_offset);
if (ds->selected) {
MP_VERBOSE(in, "refresh track %d\n", stream->index);
@ -4007,7 +4005,7 @@ void demuxer_refresh_track(struct demuxer *demuxer, struct sh_stream *stream,
if (!in->after_seek)
initiate_refresh_seek(in, ds, ref_pts);
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
// This is for demuxer implementations only. demuxer_select_track() sets the
@ -4019,20 +4017,20 @@ bool demux_stream_is_selected(struct sh_stream *stream)
if (!stream)
return false;
bool r = false;
pthread_mutex_lock(&stream->ds->in->lock);
mp_mutex_lock(&stream->ds->in->lock);
r = stream->ds->selected;
pthread_mutex_unlock(&stream->ds->in->lock);
mp_mutex_unlock(&stream->ds->in->lock);
return r;
}
void demux_set_stream_wakeup_cb(struct sh_stream *sh,
void (*cb)(void *ctx), void *ctx)
{
pthread_mutex_lock(&sh->ds->in->lock);
mp_mutex_lock(&sh->ds->in->lock);
sh->ds->wakeup_cb = cb;
sh->ds->wakeup_cb_ctx = ctx;
sh->ds->need_wakeup = true;
pthread_mutex_unlock(&sh->ds->in->lock);
mp_mutex_unlock(&sh->ds->in->lock);
}
int demuxer_add_attachment(demuxer_t *demuxer, char *name, char *type,
@ -4093,14 +4091,14 @@ void demux_block_reading(struct demuxer *demuxer, bool block)
struct demux_internal *in = demuxer->in;
assert(demuxer == in->d_user);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->blocked = block;
for (int n = 0; n < in->num_streams; n++) {
in->streams[n]->ds->need_wakeup = true;
wakeup_ds(in->streams[n]->ds);
}
pthread_cond_signal(&in->wakeup);
pthread_mutex_unlock(&in->lock);
mp_cond_signal(&in->wakeup);
mp_mutex_unlock(&in->lock);
}
static void update_bytes_read(struct demux_internal *in)
@ -4136,7 +4134,7 @@ static void update_cache(struct demux_internal *in)
bool do_update = diff >= MP_TIME_S_TO_NS(1);
// Don't lock while querying the stream.
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
int64_t stream_size = -1;
struct mp_tags *stream_metadata = NULL;
@ -4146,7 +4144,7 @@ static void update_cache(struct demux_internal *in)
stream_control(stream, STREAM_CTRL_GET_METADATA, &stream_metadata);
}
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
update_bytes_read(in);
@ -4313,7 +4311,7 @@ bool demux_cache_dump_set(struct demuxer *demuxer, double start, double end,
bool res = false;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
start = MP_ADD_PTS(start, -in->ts_offset);
end = MP_ADD_PTS(end, -in->ts_offset);
@ -4334,7 +4332,7 @@ bool demux_cache_dump_set(struct demuxer *demuxer, double start, double end,
dump_cache(in, start, end);
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return res;
}
@ -4343,9 +4341,9 @@ bool demux_cache_dump_set(struct demuxer *demuxer, double start, double end,
int demux_cache_dump_get_status(struct demuxer *demuxer)
{
struct demux_internal *in = demuxer->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
int status = in->dumper_status;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return status;
}
@ -4364,7 +4362,7 @@ double demux_probe_cache_dump_target(struct demuxer *demuxer, double pts,
if (pts == MP_NOPTS_VALUE)
return pts;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
pts = MP_ADD_PTS(pts, -in->ts_offset);
@ -4421,7 +4419,7 @@ double demux_probe_cache_dump_target(struct demuxer *demuxer, double pts,
res = MP_ADD_PTS(res, in->ts_offset);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return res;
}
@ -4459,7 +4457,7 @@ void demux_get_bitrate_stats(struct demuxer *demuxer, double *rates)
struct demux_internal *in = demuxer->in;
assert(demuxer == in->d_user);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
for (int n = 0; n < STREAM_TYPE_COUNT; n++)
rates[n] = -1;
@ -4469,7 +4467,7 @@ void demux_get_bitrate_stats(struct demuxer *demuxer, double *rates)
rates[ds->type] = MPMAX(0, rates[ds->type]) + ds->bitrate;
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
void demux_get_reader_state(struct demuxer *demuxer, struct demux_reader_state *r)
@ -4477,7 +4475,7 @@ void demux_get_reader_state(struct demuxer *demuxer, struct demux_reader_state *
struct demux_internal *in = demuxer->in;
assert(demuxer == in->d_user);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
*r = (struct demux_reader_state){
.eof = in->eof,
@ -4524,7 +4522,7 @@ void demux_get_reader_state(struct demuxer *demuxer, struct demux_reader_state *
}
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
bool demux_cancel_test(struct demuxer *demuxer)

View File

@ -17,7 +17,6 @@
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#include <pthread.h>
#include <stdlib.h>
#include <limits.h>
#include <stdbool.h>

View File

@ -1,10 +1,10 @@
#include <limits.h>
#include <pthread.h>
#include <stdatomic.h>
#include "audio/aframe.h"
#include "common/common.h"
#include "common/msg.h"
#include "osdep/threads.h"
#include "f_async_queue.h"
#include "filter_internal.h"
@ -18,7 +18,7 @@ struct mp_async_queue {
struct async_queue {
_Atomic uint64_t refcount;
pthread_mutex_t lock;
mp_mutex lock;
// -- protected by lock
struct mp_async_queue_config cfg;
@ -34,7 +34,7 @@ struct async_queue {
static void reset_queue(struct async_queue *q)
{
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
q->active = q->reading = false;
for (int n = 0; n < q->num_frames; n++)
mp_frame_unref(&q->frames[n]);
@ -46,7 +46,7 @@ static void reset_queue(struct async_queue *q)
if (q->conn[n])
mp_filter_wakeup(q->conn[n]);
}
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
}
static void unref_queue(struct async_queue *q)
@ -57,7 +57,7 @@ static void unref_queue(struct async_queue *q)
assert(count >= 0);
if (count == 0) {
reset_queue(q);
pthread_mutex_destroy(&q->lock);
mp_mutex_destroy(&q->lock);
talloc_free(q);
}
}
@ -75,7 +75,7 @@ struct mp_async_queue *mp_async_queue_create(void)
*r->q = (struct async_queue){
.refcount = 1,
};
pthread_mutex_init(&r->q->lock, NULL);
mp_mutex_init(&r->q->lock);
talloc_set_destructor(r, on_free_queue);
mp_async_queue_set_config(r, (struct mp_async_queue_config){0});
return r;
@ -142,12 +142,12 @@ void mp_async_queue_set_config(struct mp_async_queue *queue,
cfg.max_samples = MPMAX(cfg.max_samples, 1);
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
bool recompute = q->cfg.sample_unit != cfg.sample_unit;
q->cfg = cfg;
if (recompute)
recompute_sizes(q);
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
}
void mp_async_queue_reset(struct mp_async_queue *queue)
@ -158,18 +158,18 @@ void mp_async_queue_reset(struct mp_async_queue *queue)
bool mp_async_queue_is_active(struct mp_async_queue *queue)
{
struct async_queue *q = queue->q;
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
bool res = q->active;
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
return res;
}
bool mp_async_queue_is_full(struct mp_async_queue *queue)
{
struct async_queue *q = queue->q;
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
bool res = is_full(q);
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
return res;
}
@ -177,21 +177,21 @@ void mp_async_queue_resume(struct mp_async_queue *queue)
{
struct async_queue *q = queue->q;
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
if (!q->active) {
q->active = true;
// Possibly make the consumer request new frames.
if (q->conn[1])
mp_filter_wakeup(q->conn[1]);
}
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
}
void mp_async_queue_resume_reading(struct mp_async_queue *queue)
{
struct async_queue *q = queue->q;
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
if (!q->active || !q->reading) {
q->active = true;
q->reading = true;
@ -201,24 +201,24 @@ void mp_async_queue_resume_reading(struct mp_async_queue *queue)
mp_filter_wakeup(q->conn[n]);
}
}
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
}
int64_t mp_async_queue_get_samples(struct mp_async_queue *queue)
{
struct async_queue *q = queue->q;
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
int64_t res = q->samples_size;
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
return res;
}
int mp_async_queue_get_frames(struct mp_async_queue *queue)
{
struct async_queue *q = queue->q;
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
int res = q->num_frames;
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
return res;
}
@ -232,12 +232,12 @@ static void destroy(struct mp_filter *f)
struct priv *p = f->priv;
struct async_queue *q = p->q;
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
for (int n = 0; n < 2; n++) {
if (q->conn[n] == f)
q->conn[n] = NULL;
}
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
unref_queue(q);
}
@ -248,7 +248,7 @@ static void process_in(struct mp_filter *f)
struct async_queue *q = p->q;
assert(q->conn[0] == f);
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
if (!q->reading) {
// mp_async_queue_reset()/reset_queue() is usually called asynchronously,
// so we might have requested a frame earlier, and now can't use it.
@ -274,7 +274,7 @@ static void process_in(struct mp_filter *f)
}
if (p->notify && !q->num_frames)
mp_filter_wakeup(p->notify);
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
}
static void process_out(struct mp_filter *f)
@ -286,7 +286,7 @@ static void process_out(struct mp_filter *f)
if (!mp_pin_in_needs_data(f->ppins[0]))
return;
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
if (q->active && !q->reading) {
q->reading = true;
mp_filter_wakeup(q->conn[0]);
@ -301,7 +301,7 @@ static void process_out(struct mp_filter *f)
if (q->conn[0])
mp_filter_wakeup(q->conn[0]);
}
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
}
static void reset(struct mp_filter *f)
@ -309,12 +309,12 @@ static void reset(struct mp_filter *f)
struct priv *p = f->priv;
struct async_queue *q = p->q;
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
// If the queue is in reading state, it is logical that it should request
// input immediately.
if (mp_pin_get_dir(f->pins[0]) == MP_PIN_IN && q->reading)
mp_filter_wakeup(f);
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
}
// producer
@ -365,11 +365,11 @@ struct mp_filter *mp_async_queue_create_filter(struct mp_filter *parent,
atomic_fetch_add(&q->refcount, 1);
p->q = q;
pthread_mutex_lock(&q->lock);
mp_mutex_lock(&q->lock);
int slot = is_in ? 0 : 1;
assert(!q->conn[slot]); // fails if already connected on this end
q->conn[slot] = f;
pthread_mutex_unlock(&q->lock);
mp_mutex_unlock(&q->lock);
return f;
}

View File

@ -21,7 +21,6 @@
#include <stdbool.h>
#include <math.h>
#include <assert.h>
#include <pthread.h>
#include <libavutil/buffer.h>
#include <libavutil/common.h>
@ -219,9 +218,9 @@ struct priv {
struct mp_async_queue *queue; // decoded frame output queue
struct mp_dispatch_queue *dec_dispatch; // non-NULL if decoding thread used
bool dec_thread_lock; // debugging (esp. for no-thread case)
pthread_t dec_thread;
mp_thread dec_thread;
bool dec_thread_valid;
pthread_mutex_t cache_lock;
mp_mutex cache_lock;
// --- Protected by cache_lock.
char *cur_hwdec;
@ -259,13 +258,13 @@ static int decoder_list_help(struct mp_log *log, const m_option_t *opt,
// thread state. Must run on/locked with decoder thread.
static void update_cached_values(struct priv *p)
{
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
p->cur_hwdec = NULL;
if (p->decoder && p->decoder->control)
p->decoder->control(p->decoder->f, VDCTRL_GET_HWDEC, &p->cur_hwdec);
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
}
// Lock the decoder thread. This may synchronously wait until the decoder thread
@ -324,11 +323,11 @@ static void decf_reset(struct mp_filter *f)
p->pts = MP_NOPTS_VALUE;
p->last_format = p->fixed_format = (struct mp_image_params){0};
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
p->pts_reset = false;
p->attempt_framedrops = 0;
p->dropped_frames = 0;
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
p->coverart_returned = 0;
@ -347,9 +346,9 @@ int mp_decoder_wrapper_control(struct mp_decoder_wrapper *d,
struct priv *p = d->f->priv;
int res = CONTROL_UNKNOWN;
if (cmd == VDCTRL_GET_HWDEC) {
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
*(char **)arg = p->cur_hwdec;
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
} else {
thread_lock(p);
if (p->decoder && p->decoder->control)
@ -415,9 +414,9 @@ static bool reinit_decoder(struct priv *p)
user_list = p->opts->audio_decoders;
fallback = "aac";
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
bool try_spdif = p->try_spdif;
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
if (try_spdif && p->codec->codec) {
struct mp_decoder_list *spdif =
@ -450,11 +449,11 @@ static bool reinit_decoder(struct priv *p)
p->decoder = driver->create(p->decf, p->codec, sel->decoder);
if (p->decoder) {
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
const char *d = sel->desc && sel->desc[0] ? sel->desc : sel->decoder;
p->decoder_desc = talloc_strdup(p, d);
MP_VERBOSE(p, "Selected codec: %s\n", p->decoder_desc);
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
break;
}
@ -485,25 +484,25 @@ void mp_decoder_wrapper_get_desc(struct mp_decoder_wrapper *d,
char *buf, size_t buf_size)
{
struct priv *p = d->f->priv;
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
snprintf(buf, buf_size, "%s", p->decoder_desc ? p->decoder_desc : "");
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
}
void mp_decoder_wrapper_set_frame_drops(struct mp_decoder_wrapper *d, int num)
{
struct priv *p = d->f->priv;
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
p->attempt_framedrops = num;
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
}
int mp_decoder_wrapper_get_frames_dropped(struct mp_decoder_wrapper *d)
{
struct priv *p = d->f->priv;
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
int res = p->dropped_frames;
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
return res;
}
@ -519,25 +518,25 @@ double mp_decoder_wrapper_get_container_fps(struct mp_decoder_wrapper *d)
void mp_decoder_wrapper_set_spdif_flag(struct mp_decoder_wrapper *d, bool spdif)
{
struct priv *p = d->f->priv;
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
p->try_spdif = spdif;
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
}
void mp_decoder_wrapper_set_coverart_flag(struct mp_decoder_wrapper *d, bool c)
{
struct priv *p = d->f->priv;
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
p->attached_picture = c;
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
}
bool mp_decoder_wrapper_get_pts_reset(struct mp_decoder_wrapper *d)
{
struct priv *p = d->f->priv;
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
bool res = p->pts_reset;
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
return res;
}
@ -800,9 +799,9 @@ static void correct_audio_pts(struct priv *p, struct mp_aframe *aframe)
if (p->pts != MP_NOPTS_VALUE && diff > 0.1) {
MP_WARN(p, "Invalid audio PTS: %f -> %f\n", p->pts, frame_pts);
if (diff >= 5) {
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
p->pts_reset = true;
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
}
}
@ -902,10 +901,10 @@ static void feed_packet(struct priv *p)
int framedrop_type = 0;
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
if (p->attempt_framedrops)
framedrop_type = 1;
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
if (start_pts != MP_NOPTS_VALUE && packet && p->play_dir > 0 &&
packet->pts < start_pts - .005 && !p->has_broken_packet_pts)
@ -1003,7 +1002,7 @@ static void read_frame(struct priv *p)
if (!frame.type)
return;
pthread_mutex_lock(&p->cache_lock);
mp_mutex_lock(&p->cache_lock);
if (p->attached_picture && frame.type == MP_FRAME_VIDEO)
p->decoded_coverart = frame;
if (p->attempt_framedrops) {
@ -1011,7 +1010,7 @@ static void read_frame(struct priv *p)
p->attempt_framedrops = MPMAX(0, p->attempt_framedrops - dropped);
p->dropped_frames += dropped;
}
pthread_mutex_unlock(&p->cache_lock);
mp_mutex_unlock(&p->cache_lock);
if (p->decoded_coverart.type) {
mp_filter_internal_mark_progress(p->decf);
@ -1098,7 +1097,7 @@ static void decf_process(struct mp_filter *f)
read_frame(p);
}
static void *dec_thread(void *ptr)
static MP_THREAD_VOID dec_thread(void *ptr)
{
struct priv *p = ptr;
@ -1107,7 +1106,7 @@ static void *dec_thread(void *ptr)
case STREAM_VIDEO: t_name = "dec/video"; break;
case STREAM_AUDIO: t_name = "dec/audio"; break;
}
mpthread_set_name(t_name);
mp_thread_set_name(t_name);
while (!p->request_terminate_dec_thread) {
mp_filter_graph_run(p->dec_root_filter);
@ -1115,7 +1114,7 @@ static void *dec_thread(void *ptr)
mp_dispatch_queue_process(p->dec_dispatch, INFINITY);
}
return NULL;
MP_THREAD_RETURN();
}
static void public_f_reset(struct mp_filter *f)
@ -1145,7 +1144,7 @@ static void public_f_destroy(struct mp_filter *f)
p->request_terminate_dec_thread = 1;
mp_dispatch_interrupt(p->dec_dispatch);
thread_unlock(p);
pthread_join(p->dec_thread, NULL);
mp_thread_join(p->dec_thread);
p->dec_thread_valid = false;
}
@ -1153,7 +1152,7 @@ static void public_f_destroy(struct mp_filter *f)
talloc_free(p->dec_root_filter);
talloc_free(p->queue);
pthread_mutex_destroy(&p->cache_lock);
mp_mutex_destroy(&p->cache_lock);
}
static const struct mp_filter_info decf_filter = {
@ -1194,7 +1193,7 @@ struct mp_decoder_wrapper *mp_decoder_wrapper_create(struct mp_filter *parent,
struct priv *p = public_f->priv;
p->public.f = public_f;
pthread_mutex_init(&p->cache_lock, NULL);
mp_mutex_init(&p->cache_lock);
p->opt_cache = m_config_cache_alloc(p, public_f->global, &dec_wrapper_conf);
p->opts = p->opt_cache->opts;
p->header = src;
@ -1264,7 +1263,7 @@ struct mp_decoder_wrapper *mp_decoder_wrapper_create(struct mp_filter *parent,
mp_pin_connect(f_out->pins[0], p->decf->pins[0]);
p->dec_thread_valid = true;
if (pthread_create(&p->dec_thread, NULL, dec_thread, p)) {
if (mp_thread_create(&p->dec_thread, dec_thread, p)) {
p->dec_thread_valid = false;
goto error;
}

View File

@ -1,5 +1,4 @@
#include <math.h>
#include <pthread.h>
#include <stdatomic.h>
#include <libavutil/hwcontext.h>
@ -7,6 +6,7 @@
#include "common/common.h"
#include "common/global.h"
#include "common/msg.h"
#include "osdep/threads.h"
#include "osdep/timer.h"
#include "video/hwdec.h"
#include "video/img_format.h"
@ -90,7 +90,7 @@ struct filter_runner {
// For async notifications only. We don't bother making this fine grained
// across filters.
pthread_mutex_t async_lock;
mp_mutex async_lock;
// Wakeup is pending. Protected by async_lock.
bool async_wakeup_sent;
@ -196,7 +196,7 @@ void mp_filter_internal_mark_progress(struct mp_filter *f)
// sync notifications don't need any locking.
static void flush_async_notifications(struct filter_runner *r)
{
pthread_mutex_lock(&r->async_lock);
mp_mutex_lock(&r->async_lock);
for (int n = 0; n < r->num_async_pending; n++) {
struct mp_filter *f = r->async_pending[n];
add_pending(f);
@ -204,7 +204,7 @@ static void flush_async_notifications(struct filter_runner *r)
}
r->num_async_pending = 0;
r->async_wakeup_sent = false;
pthread_mutex_unlock(&r->async_lock);
mp_mutex_unlock(&r->async_lock);
}
bool mp_filter_graph_run(struct mp_filter *filter)
@ -230,11 +230,11 @@ bool mp_filter_graph_run(struct mp_filter *filter)
if (atomic_exchange_explicit(&r->interrupt_flag, false,
memory_order_acq_rel))
{
pthread_mutex_lock(&r->async_lock);
mp_mutex_lock(&r->async_lock);
if (!r->async_wakeup_sent && r->wakeup_cb)
r->wakeup_cb(r->wakeup_ctx);
r->async_wakeup_sent = true;
pthread_mutex_unlock(&r->async_lock);
mp_mutex_unlock(&r->async_lock);
exit_req = true;
}
@ -703,7 +703,7 @@ struct mp_hwdec_ctx *mp_filter_load_hwdec_device(struct mp_filter *f, int imgfmt
static void filter_wakeup(struct mp_filter *f, bool mark_only)
{
struct filter_runner *r = f->in->runner;
pthread_mutex_lock(&r->async_lock);
mp_mutex_lock(&r->async_lock);
if (!f->in->async_pending) {
f->in->async_pending = true;
// (not using a talloc parent for thread safety reasons)
@ -714,7 +714,7 @@ static void filter_wakeup(struct mp_filter *f, bool mark_only)
r->wakeup_cb(r->wakeup_ctx);
r->async_wakeup_sent = true;
}
pthread_mutex_unlock(&r->async_lock);
mp_mutex_unlock(&r->async_lock);
}
void mp_filter_wakeup(struct mp_filter *f)
@ -784,7 +784,7 @@ static void filter_destructor(void *p)
if (r->root_filter == f) {
assert(!f->in->parent);
pthread_mutex_destroy(&r->async_lock);
mp_mutex_destroy(&r->async_lock);
talloc_free(r->async_pending);
talloc_free(r);
}
@ -816,7 +816,7 @@ struct mp_filter *mp_filter_create_with_params(struct mp_filter_params *params)
.root_filter = f,
.max_run_time = INFINITY,
};
pthread_mutex_init(&f->in->runner->async_lock, NULL);
mp_mutex_init(&f->in->runner->async_lock);
}
if (!f->global)
@ -872,10 +872,10 @@ void mp_filter_graph_set_wakeup_cb(struct mp_filter *root,
{
struct filter_runner *r = root->in->runner;
assert(root == r->root_filter); // user is supposed to call this on root only
pthread_mutex_lock(&r->async_lock);
mp_mutex_lock(&r->async_lock);
r->wakeup_cb = wakeup_cb;
r->wakeup_ctx = ctx;
pthread_mutex_unlock(&r->async_lock);
mp_mutex_unlock(&r->async_lock);
}
static const char *filt_name(struct mp_filter *f)

View File

@ -54,8 +54,8 @@
#include "osdep/macosx_events.h"
#endif
#define input_lock(ictx) pthread_mutex_lock(&ictx->mutex)
#define input_unlock(ictx) pthread_mutex_unlock(&ictx->mutex)
#define input_lock(ictx) mp_mutex_lock(&ictx->mutex)
#define input_unlock(ictx) mp_mutex_unlock(&ictx->mutex)
#define MP_MAX_KEY_DOWN 4
@ -97,7 +97,7 @@ struct wheel_state {
};
struct input_ctx {
pthread_mutex_t mutex;
mp_mutex mutex;
struct mp_log *log;
struct mpv_global *global;
struct m_config_cache *opts_cache;
@ -1322,7 +1322,7 @@ struct input_ctx *mp_input_init(struct mpv_global *global,
ictx->opts = ictx->opts_cache->opts;
mpthread_mutex_init_recursive(&ictx->mutex);
mp_mutex_init_type(&ictx->mutex, MP_MUTEX_RECURSIVE);
// Setup default section, so that it does nothing.
mp_input_enable_section(ictx, NULL, MP_INPUT_ALLOW_VO_DRAGGING |
@ -1414,7 +1414,7 @@ void mp_input_uninit(struct input_ctx *ictx)
close_input_sources(ictx);
clear_queue(&ictx->cmd_queue);
talloc_free(ictx->current_down_cmd);
pthread_mutex_destroy(&ictx->mutex);
mp_mutex_destroy(&ictx->mutex);
talloc_free(ictx);
}
@ -1534,7 +1534,7 @@ struct mpv_node mp_input_get_bindings(struct input_ctx *ictx)
}
struct mp_input_src_internal {
pthread_t thread;
mp_thread thread;
bool thread_running;
bool init_done;
@ -1596,7 +1596,7 @@ static void mp_input_src_kill(struct mp_input_src *src)
if (src->cancel)
src->cancel(src);
if (src->in->thread_running)
pthread_join(src->in->thread, NULL);
mp_thread_join(src->in->thread);
if (src->uninit)
src->uninit(src);
talloc_free(src);
@ -1610,19 +1610,19 @@ void mp_input_src_init_done(struct mp_input_src *src)
{
assert(!src->in->init_done);
assert(src->in->thread_running);
assert(pthread_equal(src->in->thread, pthread_self()));
assert(mp_thread_equal(src->in->thread, mp_thread_self()));
src->in->init_done = true;
mp_rendezvous(&src->in->init_done, 0);
}
static void *input_src_thread(void *ptr)
static MP_THREAD_VOID input_src_thread(void *ptr)
{
void **args = ptr;
struct mp_input_src *src = args[0];
void (*loop_fn)(struct mp_input_src *src, void *ctx) = args[1];
void *ctx = args[2];
mpthread_set_name("input");
mp_thread_set_name("input");
src->in->thread_running = true;
@ -1631,7 +1631,7 @@ static void *input_src_thread(void *ptr)
if (!src->in->init_done)
mp_rendezvous(&src->in->init_done, -1);
return NULL;
MP_THREAD_RETURN();
}
int mp_input_add_thread_src(struct input_ctx *ictx, void *ctx,
@ -1642,7 +1642,7 @@ int mp_input_add_thread_src(struct input_ctx *ictx, void *ctx,
return -1;
void *args[] = {src, loop_fn, ctx};
if (pthread_create(&src->in->thread, NULL, input_src_thread, args)) {
if (mp_thread_create(&src->in->thread, input_src_thread, args)) {
mp_input_src_kill(src);
return -1;
}

View File

@ -15,7 +15,6 @@
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#include <pthread.h>
#include <errno.h>
#include <unistd.h>
#include <limits.h>
@ -48,7 +47,7 @@ struct mp_ipc_ctx {
struct mp_client_api *client_api;
const char *path;
pthread_t thread;
mp_thread thread;
int death_pipe[2];
};
@ -91,9 +90,9 @@ static int ipc_write_str(struct client_arg *client, const char *buf)
return 0;
}
static void *client_thread(void *p)
static MP_THREAD_VOID client_thread(void *p)
{
pthread_detach(pthread_self());
pthread_detach(mp_thread_self());
// We don't use MSG_NOSIGNAL because the moldy fruit OS doesn't support it.
struct sigaction sa = { .sa_handler = SIG_IGN, .sa_flags = SA_RESTART };
@ -106,7 +105,7 @@ static void *client_thread(void *p)
bstr client_msg = { talloc_strdup(NULL, ""), 0 };
char *tname = talloc_asprintf(NULL, "ipc/%s", arg->client_name);
mpthread_set_name(tname);
mp_thread_set_name(tname);
talloc_free(tname);
int pipe_fd = mpv_get_wakeup_pipe(arg->client);
@ -219,7 +218,7 @@ done:
} else {
mpv_destroy(h);
}
return NULL;
MP_THREAD_RETURN();
}
static bool ipc_start_client(struct mp_ipc_ctx *ctx, struct client_arg *client,
@ -232,8 +231,8 @@ static bool ipc_start_client(struct mp_ipc_ctx *ctx, struct client_arg *client,
client->log = mp_client_get_log(client->client);
pthread_t client_thr;
if (pthread_create(&client_thr, NULL, client_thread, client))
mp_thread client_thr;
if (mp_thread_create(&client_thr, client_thread, client))
goto err;
return true;
@ -295,7 +294,7 @@ bool mp_ipc_start_anon_client(struct mp_ipc_ctx *ctx, struct mpv_handle *h,
return true;
}
static void *ipc_thread(void *p)
static MP_THREAD_VOID ipc_thread(void *p)
{
int rc;
@ -304,7 +303,7 @@ static void *ipc_thread(void *p)
struct mp_ipc_ctx *arg = p;
mpthread_set_name("ipc/socket");
mp_thread_set_name("ipc/socket");
MP_VERBOSE(arg, "Starting IPC master\n");
@ -379,7 +378,7 @@ done:
if (ipc_fd >= 0)
close(ipc_fd);
return NULL;
MP_THREAD_RETURN();
}
struct mp_ipc_ctx *mp_init_ipc(struct mp_client_api *client_api,
@ -418,7 +417,7 @@ struct mp_ipc_ctx *mp_init_ipc(struct mp_client_api *client_api,
if (mp_make_wakeup_pipe(arg->death_pipe) < 0)
goto out;
if (pthread_create(&arg->thread, NULL, ipc_thread, arg))
if (mp_thread_create(&arg->thread, ipc_thread, arg))
goto out;
return arg;
@ -438,7 +437,7 @@ void mp_uninit_ipc(struct mp_ipc_ctx *arg)
return;
(void)write(arg->death_pipe[1], &(char){0}, 1);
pthread_join(arg->thread, NULL);
mp_thread_join(arg->thread);
close(arg->death_pipe[0]);
close(arg->death_pipe[1]);

View File

@ -36,7 +36,7 @@ struct mp_ipc_ctx {
struct mp_client_api *client_api;
const wchar_t *path;
pthread_t thread;
mp_thread thread;
HANDLE death_event;
};
@ -198,9 +198,9 @@ static void report_read_error(struct client_arg *arg, DWORD error)
}
}
static void *client_thread(void *p)
static MP_THREAD_VOID client_thread(void *p)
{
pthread_detach(pthread_self());
pthread_detach(mp_thread_self());
struct client_arg *arg = p;
char buf[4096];
@ -211,7 +211,7 @@ static void *client_thread(void *p)
DWORD r;
char *tname = talloc_asprintf(NULL, "ipc/%s", arg->client_name);
mpthread_set_name(tname);
mp_thread_set_name(tname);
talloc_free(tname);
arg->write_ol.hEvent = CreateEventW(NULL, TRUE, TRUE, NULL);
@ -307,7 +307,7 @@ done:
CloseHandle(arg->client_h);
mpv_destroy(arg->client);
talloc_free(arg);
return NULL;
MP_THREAD_RETURN();
}
static void ipc_start_client(struct mp_ipc_ctx *ctx, struct client_arg *client)
@ -315,8 +315,8 @@ static void ipc_start_client(struct mp_ipc_ctx *ctx, struct client_arg *client)
client->client = mp_new_client(ctx->client_api, client->client_name),
client->log = mp_client_get_log(client->client);
pthread_t client_thr;
if (pthread_create(&client_thr, NULL, client_thread, client)) {
mp_thread client_thr;
if (mp_thread_create(&client_thr, client_thread, client)) {
mpv_destroy(client->client);
CloseHandle(client->client_h);
talloc_free(client);
@ -341,7 +341,7 @@ bool mp_ipc_start_anon_client(struct mp_ipc_ctx *ctx, struct mpv_handle *h,
return false;
}
static void *ipc_thread(void *p)
static MP_THREAD_VOID ipc_thread(void *p)
{
// Use PIPE_TYPE_MESSAGE | PIPE_READMODE_BYTE so message framing is
// maintained for message-mode clients, but byte-mode clients can still
@ -358,7 +358,7 @@ static void *ipc_thread(void *p)
HANDLE client = INVALID_HANDLE_VALUE;
int client_num = 0;
mpthread_set_name("ipc/named-pipe");
mp_thread_set_name("ipc/named-pipe");
MP_VERBOSE(arg, "Starting IPC master\n");
SECURITY_ATTRIBUTES sa = {
@ -450,7 +450,7 @@ done:
CloseHandle(server);
if (ol.hEvent)
CloseHandle(ol.hEvent);
return NULL;
MP_THREAD_RETURN();
}
struct mp_ipc_ctx *mp_init_ipc(struct mp_client_api *client_api,
@ -482,7 +482,7 @@ struct mp_ipc_ctx *mp_init_ipc(struct mp_client_api *client_api,
if (!(arg->death_event = CreateEventW(NULL, TRUE, FALSE, NULL)))
goto out;
if (pthread_create(&arg->thread, NULL, ipc_thread, arg))
if (mp_thread_create(&arg->thread, ipc_thread, arg))
goto out;
talloc_free(opts);
@ -502,7 +502,7 @@ void mp_uninit_ipc(struct mp_ipc_ctx *arg)
return;
SetEvent(arg->death_event);
pthread_join(arg->thread, NULL);
mp_thread_join(arg->thread);
CloseHandle(arg->death_event);
talloc_free(arg);

View File

@ -15,13 +15,15 @@
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#include <SDL.h>
#include <stdbool.h>
#include <pthread.h>
#include <SDL.h>
#include "common/common.h"
#include "common/msg.h"
#include "input.h"
#include "input/keycodes.h"
#include "osdep/threads.h"
struct gamepad_priv {
SDL_GameController *controller;
@ -34,7 +36,7 @@ static void initialize_events(void)
gamepad_cancel_wakeup = SDL_RegisterEvents(1);
}
static pthread_once_t events_initialized = PTHREAD_ONCE_INIT;
static mp_once events_initialized = MP_STATIC_ONCE_INITIALIZER;
#define INVALID_KEY -1
@ -212,7 +214,7 @@ static void read_gamepad_thread(struct mp_input_src *src, void *param)
return;
}
pthread_once(&events_initialized, initialize_events);
mp_exec_once(&events_initialized, initialize_events);
if (gamepad_cancel_wakeup == (Uint32)-1) {
MP_ERR(src, "Can't register SDL custom events\n");

View File

@ -103,7 +103,7 @@ extern "C" {
* In addition, you were required to call all mpv_render*() API functions
* from the same thread on which mpv_render_context_create() was originally
* run (for the same the mpv_render_context). Not honoring it led to UB
* (deadlocks, use of invalid pthread_t handles), even if you moved your GL
* (deadlocks, use of invalid mp_thread handles), even if you moved your GL
* context to a different thread correctly.
* These problems were addressed in API version 1.105 (mpv 0.30.0).
*

View File

@ -26,8 +26,8 @@
struct mp_dispatch_queue {
struct mp_dispatch_item *head, *tail;
pthread_mutex_t lock;
pthread_cond_t cond;
mp_mutex lock;
mp_cond cond;
void (*wakeup_fn)(void *wakeup_ctx);
void *wakeup_ctx;
void (*onlock_fn)(void *onlock_ctx);
@ -39,7 +39,7 @@ struct mp_dispatch_queue {
// The target thread is in mp_dispatch_queue_process() (and either idling,
// locked, or running a dispatch callback).
bool in_process;
pthread_t in_process_thread;
mp_thread in_process_thread;
// The target thread is in mp_dispatch_queue_process(), and currently
// something has exclusive access to it (e.g. running a dispatch callback,
// or a different thread got it with mp_dispatch_lock()).
@ -48,7 +48,7 @@ struct mp_dispatch_queue {
size_t lock_requests;
// locked==true is due to a mp_dispatch_lock() call (for debugging).
bool locked_explicit;
pthread_t locked_explicit_thread;
mp_thread locked_explicit_thread;
};
struct mp_dispatch_item {
@ -67,8 +67,8 @@ static void queue_dtor(void *p)
assert(!queue->in_process);
assert(!queue->lock_requests);
assert(!queue->locked);
pthread_cond_destroy(&queue->cond);
pthread_mutex_destroy(&queue->lock);
mp_cond_destroy(&queue->cond);
mp_mutex_destroy(&queue->lock);
}
// A dispatch queue lets other threads run callbacks in a target thread.
@ -76,7 +76,7 @@ static void queue_dtor(void *p)
// Free the dispatch queue with talloc_free(). At the time of destruction,
// the queue must be empty. The easiest way to guarantee this is to
// terminate all potential senders, then call mp_dispatch_run() with a
// function that e.g. makes the target thread exit, then pthread_join() the
// function that e.g. makes the target thread exit, then mp_thread_join() the
// target thread, and finally destroy the queue. Another way is calling
// mp_dispatch_queue_process() after terminating all potential senders, and
// then destroying the queue.
@ -85,8 +85,8 @@ struct mp_dispatch_queue *mp_dispatch_create(void *ta_parent)
struct mp_dispatch_queue *queue = talloc_ptrtype(ta_parent, queue);
*queue = (struct mp_dispatch_queue){0};
talloc_set_destructor(queue, queue_dtor);
pthread_mutex_init(&queue->lock, NULL);
pthread_cond_init(&queue->cond, NULL);
mp_mutex_init(&queue->lock);
mp_cond_init(&queue->cond);
return queue;
}
@ -126,14 +126,14 @@ void mp_dispatch_set_onlock_fn(struct mp_dispatch_queue *queue,
static void mp_dispatch_append(struct mp_dispatch_queue *queue,
struct mp_dispatch_item *item)
{
pthread_mutex_lock(&queue->lock);
mp_mutex_lock(&queue->lock);
if (item->mergeable) {
for (struct mp_dispatch_item *cur = queue->head; cur; cur = cur->next) {
if (cur->mergeable && cur->fn == item->fn &&
cur->fn_data == item->fn_data)
{
talloc_free(item);
pthread_mutex_unlock(&queue->lock);
mp_mutex_unlock(&queue->lock);
return;
}
}
@ -148,12 +148,12 @@ static void mp_dispatch_append(struct mp_dispatch_queue *queue,
// Wake up the main thread; note that other threads might wait on this
// condition for reasons, so broadcast the condition.
pthread_cond_broadcast(&queue->cond);
mp_cond_broadcast(&queue->cond);
// No wakeup callback -> assume mp_dispatch_queue_process() needs to be
// interrupted instead.
if (!queue->wakeup_fn)
queue->interrupted = true;
pthread_mutex_unlock(&queue->lock);
mp_mutex_unlock(&queue->lock);
if (queue->wakeup_fn)
queue->wakeup_fn(queue->wakeup_ctx);
@ -218,7 +218,7 @@ void mp_dispatch_enqueue_notify(struct mp_dispatch_queue *queue,
void mp_dispatch_cancel_fn(struct mp_dispatch_queue *queue,
mp_dispatch_fn fn, void *fn_data)
{
pthread_mutex_lock(&queue->lock);
mp_mutex_lock(&queue->lock);
struct mp_dispatch_item **pcur = &queue->head;
queue->tail = NULL;
while (*pcur) {
@ -231,7 +231,7 @@ void mp_dispatch_cancel_fn(struct mp_dispatch_queue *queue,
pcur = &cur->next;
}
}
pthread_mutex_unlock(&queue->lock);
mp_mutex_unlock(&queue->lock);
}
// Run fn(fn_data) on the target thread synchronously. This function enqueues
@ -247,10 +247,10 @@ void mp_dispatch_run(struct mp_dispatch_queue *queue,
};
mp_dispatch_append(queue, &item);
pthread_mutex_lock(&queue->lock);
mp_mutex_lock(&queue->lock);
while (!item.completed)
pthread_cond_wait(&queue->cond, &queue->lock);
pthread_mutex_unlock(&queue->lock);
mp_cond_wait(&queue->cond, &queue->lock);
mp_mutex_unlock(&queue->lock);
}
// Process any outstanding dispatch items in the queue. This also handles
@ -271,18 +271,18 @@ void mp_dispatch_run(struct mp_dispatch_queue *queue,
// no enqueued callback can call the lock/unlock functions).
void mp_dispatch_queue_process(struct mp_dispatch_queue *queue, double timeout)
{
pthread_mutex_lock(&queue->lock);
mp_mutex_lock(&queue->lock);
queue->wait = timeout > 0 ? mp_time_ns_add(mp_time_ns(), timeout) : 0;
assert(!queue->in_process); // recursion not allowed
queue->in_process = true;
queue->in_process_thread = pthread_self();
queue->in_process_thread = mp_thread_self();
// Wake up thread which called mp_dispatch_lock().
if (queue->lock_requests)
pthread_cond_broadcast(&queue->cond);
mp_cond_broadcast(&queue->cond);
while (1) {
if (queue->lock_requests) {
// Block due to something having called mp_dispatch_lock().
pthread_cond_wait(&queue->cond, &queue->lock);
mp_cond_wait(&queue->cond, &queue->lock);
} else if (queue->head) {
struct mp_dispatch_item *item = queue->head;
queue->head = item->next;
@ -295,23 +295,22 @@ void mp_dispatch_queue_process(struct mp_dispatch_queue *queue, double timeout)
// from mp_dispatch_lock(), which is done by locked=true.
assert(!queue->locked);
queue->locked = true;
pthread_mutex_unlock(&queue->lock);
mp_mutex_unlock(&queue->lock);
item->fn(item->fn_data);
pthread_mutex_lock(&queue->lock);
mp_mutex_lock(&queue->lock);
assert(queue->locked);
queue->locked = false;
// Wakeup mp_dispatch_run(), also mp_dispatch_lock().
pthread_cond_broadcast(&queue->cond);
mp_cond_broadcast(&queue->cond);
if (item->asynchronous) {
talloc_free(item);
} else {
item->completed = true;
}
} else if (queue->wait > 0 && !queue->interrupted) {
struct timespec ts = mp_time_ns_to_realtime(queue->wait);
if (pthread_cond_timedwait(&queue->cond, &queue->lock, &ts))
if (mp_cond_timedwait_until(&queue->cond, &queue->lock, queue->wait))
queue->wait = 0;
} else {
break;
@ -320,7 +319,7 @@ void mp_dispatch_queue_process(struct mp_dispatch_queue *queue, double timeout)
assert(!queue->locked);
queue->in_process = false;
queue->interrupted = false;
pthread_mutex_unlock(&queue->lock);
mp_mutex_unlock(&queue->lock);
}
// If the queue is inside of mp_dispatch_queue_process(), make it return as
@ -331,10 +330,10 @@ void mp_dispatch_queue_process(struct mp_dispatch_queue *queue, double timeout)
// wakeup the main thread from another thread in a race free way).
void mp_dispatch_interrupt(struct mp_dispatch_queue *queue)
{
pthread_mutex_lock(&queue->lock);
mp_mutex_lock(&queue->lock);
queue->interrupted = true;
pthread_cond_broadcast(&queue->cond);
pthread_mutex_unlock(&queue->lock);
mp_cond_broadcast(&queue->cond);
mp_mutex_unlock(&queue->lock);
}
// If a mp_dispatch_queue_process() call is in progress, then adjust the maximum
@ -347,12 +346,12 @@ void mp_dispatch_interrupt(struct mp_dispatch_queue *queue)
// to wait in external APIs.
void mp_dispatch_adjust_timeout(struct mp_dispatch_queue *queue, int64_t until)
{
pthread_mutex_lock(&queue->lock);
mp_mutex_lock(&queue->lock);
if (queue->in_process && queue->wait > until) {
queue->wait = until;
pthread_cond_broadcast(&queue->cond);
mp_cond_broadcast(&queue->cond);
}
pthread_mutex_unlock(&queue->lock);
mp_mutex_unlock(&queue->lock);
}
// Grant exclusive access to the target thread's state. While this is active,
@ -364,13 +363,13 @@ void mp_dispatch_adjust_timeout(struct mp_dispatch_queue *queue, int64_t until)
// already holding the dispatch lock.
void mp_dispatch_lock(struct mp_dispatch_queue *queue)
{
pthread_mutex_lock(&queue->lock);
mp_mutex_lock(&queue->lock);
// Must not be called recursively from dispatched callbacks.
if (queue->in_process)
assert(!pthread_equal(queue->in_process_thread, pthread_self()));
assert(!mp_thread_equal(queue->in_process_thread, mp_thread_self()));
// Must not be called recursively at all.
if (queue->locked_explicit)
assert(!pthread_equal(queue->locked_explicit_thread, pthread_self()));
assert(!mp_thread_equal(queue->locked_explicit_thread, mp_thread_self()));
queue->lock_requests += 1;
// And now wait until the target thread gets "trapped" within the
// mp_dispatch_queue_process() call, which will mean we get exclusive
@ -378,41 +377,41 @@ void mp_dispatch_lock(struct mp_dispatch_queue *queue)
if (queue->onlock_fn)
queue->onlock_fn(queue->onlock_ctx);
while (!queue->in_process) {
pthread_mutex_unlock(&queue->lock);
mp_mutex_unlock(&queue->lock);
if (queue->wakeup_fn)
queue->wakeup_fn(queue->wakeup_ctx);
pthread_mutex_lock(&queue->lock);
mp_mutex_lock(&queue->lock);
if (queue->in_process)
break;
pthread_cond_wait(&queue->cond, &queue->lock);
mp_cond_wait(&queue->cond, &queue->lock);
}
// Wait until we can get the lock.
while (!queue->in_process || queue->locked)
pthread_cond_wait(&queue->cond, &queue->lock);
mp_cond_wait(&queue->cond, &queue->lock);
// "Lock".
assert(queue->lock_requests);
assert(!queue->locked);
assert(!queue->locked_explicit);
queue->locked = true;
queue->locked_explicit = true;
queue->locked_explicit_thread = pthread_self();
pthread_mutex_unlock(&queue->lock);
queue->locked_explicit_thread = mp_thread_self();
mp_mutex_unlock(&queue->lock);
}
// Undo mp_dispatch_lock().
void mp_dispatch_unlock(struct mp_dispatch_queue *queue)
{
pthread_mutex_lock(&queue->lock);
mp_mutex_lock(&queue->lock);
assert(queue->locked);
// Must be called after a mp_dispatch_lock(), from the same thread.
assert(queue->locked_explicit);
assert(pthread_equal(queue->locked_explicit_thread, pthread_self()));
assert(mp_thread_equal(queue->locked_explicit_thread, mp_thread_self()));
// "Unlock".
queue->locked = false;
queue->locked_explicit = false;
queue->lock_requests -= 1;
// Wakeup mp_dispatch_queue_process(), and maybe other mp_dispatch_lock()s.
// (Would be nice to wake up only 1 other locker if lock_requests>0.)
pthread_cond_broadcast(&queue->cond);
pthread_mutex_unlock(&queue->lock);
mp_cond_broadcast(&queue->cond);
mp_mutex_unlock(&queue->lock);
}

View File

@ -22,15 +22,15 @@
#include <libavcodec/jni.h>
#include <libavutil/mem.h>
#include <libavutil/bprint.h>
#include <pthread.h>
#include <stdlib.h>
#include "jni.h"
#include "osdep/threads.h"
static JavaVM *java_vm;
static pthread_key_t current_env;
static pthread_once_t once = PTHREAD_ONCE_INIT;
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
static mp_once once = MP_STATIC_ONCE_INITIALIZER;
static mp_static_mutex lock = MP_STATIC_MUTEX_INITIALIZER;
static void jni_detach_env(void *data)
{
@ -49,7 +49,7 @@ JNIEnv *mp_jni_get_env(struct mp_log *log)
int ret = 0;
JNIEnv *env = NULL;
pthread_mutex_lock(&lock);
mp_mutex_lock(&lock);
if (java_vm == NULL) {
java_vm = av_jni_get_java_vm(NULL);
}
@ -59,7 +59,7 @@ JNIEnv *mp_jni_get_env(struct mp_log *log)
goto done;
}
pthread_once(&once, jni_create_pthread_key);
mp_exec_once(&once, jni_create_pthread_key);
if ((env = pthread_getspecific(current_env)) != NULL) {
goto done;
@ -86,7 +86,7 @@ JNIEnv *mp_jni_get_env(struct mp_log *log)
}
done:
pthread_mutex_unlock(&lock);
mp_mutex_unlock(&lock);
return env;
}

View File

@ -19,12 +19,12 @@
*/
#include <stdint.h>
#include <pthread.h>
#include "osdep/threads.h"
#include "random.h"
static uint64_t state[4];
static pthread_mutex_t state_mutex = PTHREAD_MUTEX_INITIALIZER;
static mp_static_mutex state_mutex = MP_STATIC_MUTEX_INITIALIZER;
static inline uint64_t rotl_u64(const uint64_t x, const int k)
{
@ -41,18 +41,18 @@ static inline uint64_t splitmix64(uint64_t *const x)
void mp_rand_seed(uint64_t seed)
{
pthread_mutex_lock(&state_mutex);
mp_mutex_lock(&state_mutex);
state[0] = seed;
for (int i = 1; i < 4; i++)
state[i] = splitmix64(&seed);
pthread_mutex_unlock(&state_mutex);
mp_mutex_unlock(&state_mutex);
}
uint64_t mp_rand_next(void)
{
uint64_t result, t;
pthread_mutex_lock(&state_mutex);
mp_mutex_lock(&state_mutex);
result = rotl_u64(state[1] * 5, 7) * 9;
t = state[1] << 17;
@ -64,7 +64,7 @@ uint64_t mp_rand_next(void)
state[2] ^= t;
state[3] = rotl_u64(state[3], 45);
pthread_mutex_unlock(&state_mutex);
mp_mutex_unlock(&state_mutex);
return result;
}

View File

@ -1,9 +1,10 @@
#include <pthread.h>
#include "rendezvous.h"
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t wakeup = PTHREAD_COND_INITIALIZER;
#include "osdep/threads.h"
static mp_static_mutex lock = MP_STATIC_MUTEX_INITIALIZER;
static mp_cond wakeup = MP_STATIC_COND_INITIALIZER;
static struct waiter *waiters;
@ -31,7 +32,7 @@ struct waiter {
intptr_t mp_rendezvous(void *tag, intptr_t value)
{
struct waiter wait = { .tag = tag, .value = &value };
pthread_mutex_lock(&lock);
mp_mutex_lock(&lock);
struct waiter **prev = &waiters;
while (*prev) {
if ((*prev)->tag == tag) {
@ -40,15 +41,15 @@ intptr_t mp_rendezvous(void *tag, intptr_t value)
value = tmp;
(*prev)->value = NULL; // signals completion
*prev = (*prev)->next; // unlink
pthread_cond_broadcast(&wakeup);
mp_cond_broadcast(&wakeup);
goto done;
}
prev = &(*prev)->next;
}
*prev = &wait;
while (wait.value)
pthread_cond_wait(&wakeup, &lock);
mp_cond_wait(&wakeup, &lock);
done:
pthread_mutex_unlock(&lock);
mp_mutex_unlock(&lock);
return value;
}

View File

@ -13,8 +13,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <pthread.h>
#include "common/common.h"
#include "osdep/threads.h"
#include "osdep/timer.h"
@ -33,12 +31,12 @@ struct work {
struct mp_thread_pool {
int min_threads, max_threads;
pthread_mutex_t lock;
pthread_cond_t wakeup;
mp_mutex lock;
mp_cond wakeup;
// --- the following fields are protected by lock
pthread_t *threads;
mp_thread *threads;
int num_threads;
// Number of threads which have taken up work and are still processing it.
@ -50,15 +48,15 @@ struct mp_thread_pool {
int num_work;
};
static void *worker_thread(void *arg)
static MP_THREAD_VOID worker_thread(void *arg)
{
struct mp_thread_pool *pool = arg;
mpthread_set_name("worker");
mp_thread_set_name("worker");
pthread_mutex_lock(&pool->lock);
mp_mutex_lock(&pool->lock);
struct timespec ts = {0};
int64_t destroy_deadline = 0;
bool got_timeout = false;
while (1) {
struct work work = {0};
@ -72,25 +70,25 @@ static void *worker_thread(void *arg)
break;
if (pool->num_threads > pool->min_threads) {
if (!ts.tv_sec && !ts.tv_nsec)
ts = mp_rel_time_to_timespec(DESTROY_TIMEOUT);
if (pthread_cond_timedwait(&pool->wakeup, &pool->lock, &ts))
if (!destroy_deadline)
destroy_deadline = mp_time_ns_add(mp_time_ns(), DESTROY_TIMEOUT);
if (mp_cond_timedwait_until(&pool->wakeup, &pool->lock, destroy_deadline))
got_timeout = pool->num_threads > pool->min_threads;
} else {
pthread_cond_wait(&pool->wakeup, &pool->lock);
mp_cond_wait(&pool->wakeup, &pool->lock);
}
continue;
}
pool->busy_threads += 1;
pthread_mutex_unlock(&pool->lock);
mp_mutex_unlock(&pool->lock);
work.fn(work.fn_ctx);
pthread_mutex_lock(&pool->lock);
mp_mutex_lock(&pool->lock);
pool->busy_threads -= 1;
ts = (struct timespec){0};
destroy_deadline = 0;
got_timeout = false;
}
@ -98,18 +96,18 @@ static void *worker_thread(void *arg)
// timeout, and nobody is waiting for us. We have to remove ourselves.
if (!pool->terminate) {
for (int n = 0; n < pool->num_threads; n++) {
if (pthread_equal(pool->threads[n], pthread_self())) {
pthread_detach(pthread_self());
if (mp_thread_equal(pool->threads[n], mp_thread_self())) {
pthread_detach(mp_thread_self());
MP_TARRAY_REMOVE_AT(pool->threads, pool->num_threads, n);
pthread_mutex_unlock(&pool->lock);
return NULL;
mp_mutex_unlock(&pool->lock);
MP_THREAD_RETURN();
}
}
MP_ASSERT_UNREACHABLE();
}
pthread_mutex_unlock(&pool->lock);
return NULL;
mp_mutex_unlock(&pool->lock);
MP_THREAD_RETURN();
}
static void thread_pool_dtor(void *ctx)
@ -117,33 +115,33 @@ static void thread_pool_dtor(void *ctx)
struct mp_thread_pool *pool = ctx;
pthread_mutex_lock(&pool->lock);
mp_mutex_lock(&pool->lock);
pool->terminate = true;
pthread_cond_broadcast(&pool->wakeup);
mp_cond_broadcast(&pool->wakeup);
pthread_t *threads = pool->threads;
mp_thread *threads = pool->threads;
int num_threads = pool->num_threads;
pool->threads = NULL;
pool->num_threads = 0;
pthread_mutex_unlock(&pool->lock);
mp_mutex_unlock(&pool->lock);
for (int n = 0; n < num_threads; n++)
pthread_join(threads[n], NULL);
mp_thread_join(threads[n]);
assert(pool->num_work == 0);
assert(pool->num_threads == 0);
pthread_cond_destroy(&pool->wakeup);
pthread_mutex_destroy(&pool->lock);
mp_cond_destroy(&pool->wakeup);
mp_mutex_destroy(&pool->lock);
}
static bool add_thread(struct mp_thread_pool *pool)
{
pthread_t thread;
mp_thread thread;
if (pthread_create(&thread, NULL, worker_thread, pool) != 0)
if (mp_thread_create(&thread, worker_thread, pool) != 0)
return false;
MP_TARRAY_APPEND(pool, pool->threads, pool->num_threads, thread);
@ -160,17 +158,17 @@ struct mp_thread_pool *mp_thread_pool_create(void *ta_parent, int init_threads,
struct mp_thread_pool *pool = talloc_zero(ta_parent, struct mp_thread_pool);
talloc_set_destructor(pool, thread_pool_dtor);
pthread_mutex_init(&pool->lock, NULL);
pthread_cond_init(&pool->wakeup, NULL);
mp_mutex_init(&pool->lock);
mp_cond_init(&pool->wakeup);
pool->min_threads = min_threads;
pool->max_threads = max_threads;
pthread_mutex_lock(&pool->lock);
mp_mutex_lock(&pool->lock);
for (int n = 0; n < init_threads; n++)
add_thread(pool);
bool ok = pool->num_threads >= init_threads;
pthread_mutex_unlock(&pool->lock);
mp_mutex_unlock(&pool->lock);
if (!ok)
TA_FREEP(&pool);
@ -185,7 +183,7 @@ static bool thread_pool_add(struct mp_thread_pool *pool, void (*fn)(void *ctx),
assert(fn);
pthread_mutex_lock(&pool->lock);
mp_mutex_lock(&pool->lock);
struct work work = {fn, fn_ctx};
// If there are not enough threads to process all at once, but we can
@ -203,10 +201,10 @@ static bool thread_pool_add(struct mp_thread_pool *pool, void (*fn)(void *ctx),
if (ok) {
MP_TARRAY_INSERT_AT(pool, pool->work, pool->num_work, 0, work);
pthread_cond_signal(&pool->wakeup);
mp_cond_signal(&pool->wakeup);
}
pthread_mutex_unlock(&pool->lock);
mp_mutex_unlock(&pool->lock);
return ok;
}

View File

@ -35,10 +35,10 @@
uintptr_t mp_waiter_wait(struct mp_waiter *waiter)
{
pthread_mutex_lock(&waiter->lock);
mp_mutex_lock(&waiter->lock);
while (!waiter->done)
pthread_cond_wait(&waiter->wakeup, &waiter->lock);
pthread_mutex_unlock(&waiter->lock);
mp_cond_wait(&waiter->wakeup, &waiter->lock);
mp_mutex_unlock(&waiter->lock);
uintptr_t ret = waiter->value;
@ -50,8 +50,8 @@ uintptr_t mp_waiter_wait(struct mp_waiter *waiter)
// following functions will do nearly nothing. This is true for Windows
// and Linux. But some lesser OSes still might allocate kernel objects
// when initializing mutexes, so destroy them here.
pthread_mutex_destroy(&waiter->lock);
pthread_cond_destroy(&waiter->wakeup);
mp_mutex_destroy(&waiter->lock);
mp_cond_destroy(&waiter->wakeup);
memset(waiter, 0xCA, sizeof(*waiter)); // for debugging
@ -60,25 +60,25 @@ uintptr_t mp_waiter_wait(struct mp_waiter *waiter)
void mp_waiter_wakeup(struct mp_waiter *waiter, uintptr_t value)
{
pthread_mutex_lock(&waiter->lock);
mp_mutex_lock(&waiter->lock);
assert(!waiter->done);
waiter->done = true;
waiter->value = value;
pthread_cond_signal(&waiter->wakeup);
pthread_mutex_unlock(&waiter->lock);
mp_cond_signal(&waiter->wakeup);
mp_mutex_unlock(&waiter->lock);
}
bool mp_waiter_poll(struct mp_waiter *waiter)
{
pthread_mutex_lock(&waiter->lock);
mp_mutex_lock(&waiter->lock);
bool r = waiter->done;
pthread_mutex_unlock(&waiter->lock);
mp_mutex_unlock(&waiter->lock);
return r;
}
struct mp_cancel {
pthread_mutex_t lock;
pthread_cond_t wakeup;
mp_mutex lock;
mp_cond wakeup;
// Semaphore state and "mirrors".
atomic_bool triggered;
@ -117,8 +117,8 @@ static void cancel_destroy(void *p)
CloseHandle(c->win32_event);
#endif
pthread_mutex_destroy(&c->lock);
pthread_cond_destroy(&c->wakeup);
mp_mutex_destroy(&c->lock);
mp_cond_destroy(&c->wakeup);
}
struct mp_cancel *mp_cancel_new(void *talloc_ctx)
@ -129,8 +129,8 @@ struct mp_cancel *mp_cancel_new(void *talloc_ctx)
.triggered = false,
.wakeup_pipe = {-1, -1},
};
pthread_mutex_init(&c->lock, NULL);
pthread_cond_init(&c->wakeup, NULL);
mp_mutex_init(&c->lock);
mp_cond_init(&c->wakeup);
return c;
}
@ -138,7 +138,7 @@ static void trigger_locked(struct mp_cancel *c)
{
atomic_store(&c->triggered, true);
pthread_cond_broadcast(&c->wakeup); // condition bound to c->triggered
mp_cond_broadcast(&c->wakeup); // condition bound to c->triggered
if (c->cb)
c->cb(c->cb_ctx);
@ -157,14 +157,14 @@ static void trigger_locked(struct mp_cancel *c)
void mp_cancel_trigger(struct mp_cancel *c)
{
pthread_mutex_lock(&c->lock);
mp_mutex_lock(&c->lock);
trigger_locked(c);
pthread_mutex_unlock(&c->lock);
mp_mutex_unlock(&c->lock);
}
void mp_cancel_reset(struct mp_cancel *c)
{
pthread_mutex_lock(&c->lock);
mp_mutex_lock(&c->lock);
atomic_store(&c->triggered, false);
@ -182,7 +182,7 @@ void mp_cancel_reset(struct mp_cancel *c)
ResetEvent(c->win32_event);
#endif
pthread_mutex_unlock(&c->lock);
mp_mutex_unlock(&c->lock);
}
bool mp_cancel_test(struct mp_cancel *c)
@ -192,13 +192,13 @@ bool mp_cancel_test(struct mp_cancel *c)
bool mp_cancel_wait(struct mp_cancel *c, double timeout)
{
struct timespec ts = mp_rel_time_to_timespec(timeout);
pthread_mutex_lock(&c->lock);
int64_t wait_until = mp_time_ns_add(mp_time_ns(), timeout);
mp_mutex_lock(&c->lock);
while (!mp_cancel_test(c)) {
if (pthread_cond_timedwait(&c->wakeup, &c->lock, &ts))
if (mp_cond_timedwait_until(&c->wakeup, &c->lock, wait_until))
break;
}
pthread_mutex_unlock(&c->lock);
mp_mutex_unlock(&c->lock);
return mp_cancel_test(c);
}
@ -213,11 +213,11 @@ static void retrigger_locked(struct mp_cancel *c)
void mp_cancel_set_cb(struct mp_cancel *c, void (*cb)(void *ctx), void *ctx)
{
pthread_mutex_lock(&c->lock);
mp_mutex_lock(&c->lock);
c->cb = cb;
c->cb_ctx = ctx;
retrigger_locked(c);
pthread_mutex_unlock(&c->lock);
mp_mutex_unlock(&c->lock);
}
void mp_cancel_set_parent(struct mp_cancel *slave, struct mp_cancel *parent)
@ -228,22 +228,22 @@ void mp_cancel_set_parent(struct mp_cancel *slave, struct mp_cancel *parent)
if (slave->parent == parent)
return;
if (slave->parent) {
pthread_mutex_lock(&slave->parent->lock);
mp_mutex_lock(&slave->parent->lock);
LL_REMOVE(siblings, &slave->parent->slaves, slave);
pthread_mutex_unlock(&slave->parent->lock);
mp_mutex_unlock(&slave->parent->lock);
}
slave->parent = parent;
if (slave->parent) {
pthread_mutex_lock(&slave->parent->lock);
mp_mutex_lock(&slave->parent->lock);
LL_APPEND(siblings, &slave->parent->slaves, slave);
retrigger_locked(slave->parent);
pthread_mutex_unlock(&slave->parent->lock);
mp_mutex_unlock(&slave->parent->lock);
}
}
int mp_cancel_get_fd(struct mp_cancel *c)
{
pthread_mutex_lock(&c->lock);
mp_mutex_lock(&c->lock);
if (c->wakeup_pipe[0] < 0) {
#if defined(__GNUC__) && !defined(__clang__)
# pragma GCC diagnostic push
@ -255,7 +255,7 @@ int mp_cancel_get_fd(struct mp_cancel *c)
#endif
retrigger_locked(c);
}
pthread_mutex_unlock(&c->lock);
mp_mutex_unlock(&c->lock);
return c->wakeup_pipe[0];
@ -264,12 +264,12 @@ int mp_cancel_get_fd(struct mp_cancel *c)
#ifdef __MINGW32__
void *mp_cancel_get_event(struct mp_cancel *c)
{
pthread_mutex_lock(&c->lock);
mp_mutex_lock(&c->lock);
if (!c->win32_event) {
c->win32_event = CreateEventW(NULL, TRUE, FALSE, NULL);
retrigger_locked(c);
}
pthread_mutex_unlock(&c->lock);
mp_mutex_unlock(&c->lock);
return c->win32_event;
}

View File

@ -2,22 +2,23 @@
#include <stdint.h>
#include <stdbool.h>
#include <pthread.h>
#include "osdep/threads.h"
// This is basically a single-shot semaphore, intended as light-weight solution
// for just making a thread wait for another thread.
struct mp_waiter {
// All fields are considered private. Use MP_WAITER_INITIALIZER to init.
pthread_mutex_t lock;
pthread_cond_t wakeup;
mp_mutex lock;
mp_cond wakeup;
bool done;
uintptr_t value;
};
// Initialize a mp_waiter object for use with mp_waiter_*().
#define MP_WAITER_INITIALIZER { \
.lock = PTHREAD_MUTEX_INITIALIZER, \
.wakeup = PTHREAD_COND_INITIALIZER, \
.lock = MP_STATIC_MUTEX_INITIALIZER, \
.wakeup = MP_STATIC_COND_INITIALIZER, \
}
// Block until some other thread calls mp_waiter_wakeup(). The function returns

View File

@ -17,7 +17,6 @@
#include <assert.h>
#include <errno.h>
#include <pthread.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdio.h>
@ -25,17 +24,18 @@
#include <string.h>
#include <strings.h>
#include "m_config_core.h"
#include "options/m_option.h"
#include "common/common.h"
#include "common/global.h"
#include "common/msg.h"
#include "common/msg_control.h"
#include "common/msg.h"
#include "m_config_core.h"
#include "misc/dispatch.h"
#include "options/m_option.h"
#include "osdep/threads.h"
// For use with m_config_cache.
struct m_config_shadow {
pthread_mutex_t lock;
mp_mutex lock;
// Incremented on every option change.
_Atomic uint64_t ts;
// -- immutable after init
@ -417,14 +417,14 @@ static void shadow_destroy(void *p)
assert(shadow->num_listeners == 0);
talloc_free(shadow->data);
pthread_mutex_destroy(&shadow->lock);
mp_mutex_destroy(&shadow->lock);
}
struct m_config_shadow *m_config_shadow_new(const struct m_sub_options *root)
{
struct m_config_shadow *shadow = talloc_zero(NULL, struct m_config_shadow);
talloc_set_destructor(shadow, shadow_destroy);
pthread_mutex_init(&shadow->lock, NULL);
mp_mutex_init(&shadow->lock);
add_sub_group(shadow, NULL, -1, -1, root);
@ -566,9 +566,9 @@ struct m_config_cache *m_config_cache_from_shadow(void *ta_parent,
in->shadow = shadow;
in->src = shadow->data;
pthread_mutex_lock(&shadow->lock);
mp_mutex_lock(&shadow->lock);
in->data = allocate_option_data(cache, shadow, group_index, in->src);
pthread_mutex_unlock(&shadow->lock);
mp_mutex_unlock(&shadow->lock);
cache->opts = in->data->gdata[0].udata;
@ -675,7 +675,7 @@ bool m_config_cache_update(struct m_config_cache *cache)
if (!cache_check_update(cache))
return false;
pthread_mutex_lock(&shadow->lock);
mp_mutex_lock(&shadow->lock);
bool res = false;
while (1) {
void *p;
@ -684,7 +684,7 @@ bool m_config_cache_update(struct m_config_cache *cache)
break;
res = true;
}
pthread_mutex_unlock(&shadow->lock);
mp_mutex_unlock(&shadow->lock);
return res;
}
@ -697,9 +697,9 @@ bool m_config_cache_get_next_changed(struct m_config_cache *cache, void **opt)
if (!cache_check_update(cache) && in->upd_group < 0)
return false;
pthread_mutex_lock(&shadow->lock);
mp_mutex_lock(&shadow->lock);
update_next_option(cache, opt);
pthread_mutex_unlock(&shadow->lock);
mp_mutex_unlock(&shadow->lock);
return !!*opt;
}
@ -744,7 +744,7 @@ bool m_config_cache_write_opt(struct m_config_cache *cache, void *ptr)
struct m_config_group *g = &shadow->groups[group_idx];
const struct m_option *opt = &g->group->opts[opt_idx];
pthread_mutex_lock(&shadow->lock);
mp_mutex_lock(&shadow->lock);
struct m_group_data *gdst = m_config_gdata(in->data, group_idx);
struct m_group_data *gsrc = m_config_gdata(in->src, group_idx);
@ -763,7 +763,7 @@ bool m_config_cache_write_opt(struct m_config_cache *cache, void *ptr)
}
}
pthread_mutex_unlock(&shadow->lock);
mp_mutex_unlock(&shadow->lock);
return changed;
}
@ -774,7 +774,7 @@ void m_config_cache_set_wakeup_cb(struct m_config_cache *cache,
struct config_cache *in = cache->internal;
struct m_config_shadow *shadow = in->shadow;
pthread_mutex_lock(&shadow->lock);
mp_mutex_lock(&shadow->lock);
if (in->in_list) {
for (int n = 0; n < shadow->num_listeners; n++) {
if (shadow->listeners[n] == in) {
@ -796,7 +796,7 @@ void m_config_cache_set_wakeup_cb(struct m_config_cache *cache,
in->wakeup_cb = cb;
in->wakeup_cb_ctx = cb_ctx;
}
pthread_mutex_unlock(&shadow->lock);
mp_mutex_unlock(&shadow->lock);
}
static void dispatch_notify(void *p)

View File

@ -18,7 +18,6 @@
#include <assert.h>
#include <errno.h>
#include <float.h>
#include <pthread.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdio.h>
@ -28,15 +27,16 @@
#include "libmpv/client.h"
#include "m_config.h"
#include "m_config_frontend.h"
#include "options/m_option.h"
#include "common/common.h"
#include "common/global.h"
#include "common/msg.h"
#include "common/msg_control.h"
#include "common/msg.h"
#include "m_config_frontend.h"
#include "m_config.h"
#include "misc/dispatch.h"
#include "misc/node.h"
#include "options/m_option.h"
#include "osdep/threads.h"
extern const char mp_help_text[];

View File

@ -146,7 +146,7 @@ char *mp_to_utf8(void *talloc_ctx, const wchar_t *s)
#include <io.h>
#include <fcntl.h>
#include <pthread.h>
#include "osdep/threads.h"
static void set_errno_from_lasterror(void)
{
@ -670,8 +670,8 @@ static void init_getenv(void)
char *mp_getenv(const char *name)
{
static pthread_once_t once_init_getenv = PTHREAD_ONCE_INIT;
pthread_once(&once_init_getenv, init_getenv);
static mp_once once_init_getenv = MP_STATIC_ONCE_INITIALIZER;
mp_exec_once(&once_init_getenv, init_getenv);
// Copied from musl, http://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
// Copyright © 2005-2013 Rich Felker, standard MIT license
int i;
@ -738,8 +738,8 @@ void *mp_dlsym(void *handle, const char *symbol)
char *mp_dlerror(void)
{
static pthread_once_t once_init_dlerror = PTHREAD_ONCE_INIT;
pthread_once(&once_init_dlerror, mp_dl_init);
static mp_once once_init_dlerror = MP_STATIC_ONCE_INITIALIZER;
mp_exec_once(&once_init_dlerror, mp_dl_init);
mp_dl_free();
if (mp_dl_result.errcode == 0)

View File

@ -16,7 +16,6 @@
*/
#include <stdio.h>
#include <pthread.h>
#include "config.h"
#include "mpv_talloc.h"
@ -88,7 +87,7 @@ const struct m_sub_options macos_conf = {
// running in libmpv mode, and cocoa_main() was never called.
static bool application_instantiated;
static pthread_t playback_thread_id;
static mp_thread playback_thread_id;
@interface Application ()
{
@ -273,9 +272,9 @@ static void cocoa_run_runloop(void)
[pool drain];
}
static void *playback_thread(void *ctx_obj)
static MP_THREAD_VOID playback_thread(void *ctx_obj)
{
mpthread_set_name("core/playback");
mp_thread_set_name("core/playback");
@autoreleasepool {
struct playback_thread_ctx *ctx = (struct playback_thread_ctx*) ctx_obj;
int r = mpv_main(*ctx->argc, *ctx->argv);
@ -364,7 +363,7 @@ int cocoa_main(int argc, char *argv[])
init_cocoa_application(false);
}
pthread_create(&playback_thread_id, NULL, playback_thread, &ctx);
mp_thread_create(&playback_thread_id, playback_thread, &ctx);
[[EventsResponder sharedInstance] waitForInputContext];
cocoa_run_runloop();
@ -373,7 +372,7 @@ int cocoa_main(int argc, char *argv[])
fprintf(stderr, "There was either a problem "
"initializing Cocoa or the Runloop was stopped unexpectedly. "
"Please report this issues to a developer.\n");
pthread_join(playback_thread_id, NULL);
mp_thread_join(playback_thread_id);
return 1;
}
}

View File

@ -16,14 +16,14 @@
*/
#include <string.h>
#include <pthread.h>
#include "options/path.h"
#include "osdep/threads.h"
#include "path.h"
#include "config.h"
static pthread_once_t path_init_once = PTHREAD_ONCE_INIT;
static mp_once path_init_once = MP_STATIC_ONCE_INITIALIZER;
static char mpv_home[512];
static char old_home[512];
@ -62,7 +62,7 @@ static void path_init(void)
const char *mp_get_platform_path_darwin(void *talloc_ctx, const char *type)
{
pthread_once(&path_init_once, path_init);
mp_exec_once(&path_init_once, path_init);
if (strcmp(type, "home") == 0)
return mpv_home;
if (strcmp(type, "old_home") == 0)

View File

@ -16,14 +16,14 @@
*/
#include <string.h>
#include <pthread.h>
#include "options/path.h"
#include "osdep/threads.h"
#include "path.h"
#include "config.h"
static pthread_once_t path_init_once = PTHREAD_ONCE_INIT;
static mp_once path_init_once = MP_STATIC_ONCE_INITIALIZER;
#define CONF_MAX 512
static char mpv_home[CONF_MAX];
@ -83,7 +83,7 @@ static void path_init(void)
const char *mp_get_platform_path_unix(void *talloc_ctx, const char *type)
{
pthread_once(&path_init_once, path_init);
mp_exec_once(&path_init_once, path_init);
if (strcmp(type, "home") == 0)
return mpv_home;
if (strcmp(type, "old_home") == 0)

View File

@ -18,15 +18,15 @@
#include <windows.h>
#include <shlobj.h>
#include <knownfolders.h>
#include <pthread.h>
#include "osdep/path.h"
#include "osdep/io.h"
#include "options/path.h"
#include "osdep/io.h"
#include "osdep/path.h"
#include "osdep/threads.h"
// Warning: do not use PATH_MAX. Cygwin messed it up.
static pthread_once_t path_init_once = PTHREAD_ONCE_INIT;
static mp_once path_init_once = MP_STATIC_ONCE_INITIALIZER;
static char *portable_path;
@ -88,7 +88,7 @@ static void path_init(void)
const char *mp_get_platform_path_win(void *talloc_ctx, const char *type)
{
pthread_once(&path_init_once, path_init);
mp_exec_once(&path_init_once, path_init);
if (portable_path) {
if (strcmp(type, "home") == 0)
return portable_path;

View File

@ -14,13 +14,13 @@
#define MP_SEMAPHORE_EMULATION
#include <pthread.h>
#include "osdep/threads.h"
#define MP_SEM_VALUE_MAX 4096
typedef struct {
int wakeup_pipe[2];
pthread_mutex_t lock;
mp_mutex lock;
// protected by lock
unsigned int count;
} mp_sem_t;

View File

@ -40,7 +40,7 @@ int mp_sem_init(mp_sem_t *sem, int pshared, unsigned int value)
if (mp_make_wakeup_pipe(sem->wakeup_pipe) < 0)
return -1;
sem->count = 0;
pthread_mutex_init(&sem->lock, NULL);
mp_mutex_init(&sem->lock);
return 0;
}
@ -52,7 +52,7 @@ int mp_sem_wait(mp_sem_t *sem)
int mp_sem_trywait(mp_sem_t *sem)
{
int r = -1;
pthread_mutex_lock(&sem->lock);
mp_mutex_lock(&sem->lock);
if (sem->count == 0) {
char buf[1024];
ssize_t s = read(sem->wakeup_pipe[0], buf, sizeof(buf));
@ -63,7 +63,7 @@ int mp_sem_trywait(mp_sem_t *sem)
sem->count -= 1;
r = 0;
}
pthread_mutex_unlock(&sem->lock);
mp_mutex_unlock(&sem->lock);
if (r < 0)
errno = EAGAIN;
return r;
@ -110,7 +110,7 @@ int mp_sem_destroy(mp_sem_t *sem)
{
close(sem->wakeup_pipe[0]);
close(sem->wakeup_pipe[1]);
pthread_mutex_destroy(&sem->lock);
mp_mutex_destroy(&sem->lock);
return 0;
}

View File

@ -16,7 +16,6 @@
*/
#include <poll.h>
#include <pthread.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>

View File

@ -15,8 +15,6 @@
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#include <pthread.h>
#include "common/common.h"
#include "common/msg.h"
#include "common/msg_control.h"

View File

@ -23,7 +23,6 @@
#include <signal.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <pthread.h>
#include <assert.h>
#include <termios.h>
@ -349,7 +348,7 @@ static void getch2_poll(void)
do_deactivate_getch2();
}
static pthread_t input_thread;
static mp_thread input_thread;
static struct input_ctx *input_ctx;
static int death_pipe[2] = {-1, -1};
enum { PIPE_STOP, PIPE_CONT };
@ -406,9 +405,9 @@ static void quit_request_sighandler(int signum)
errno = saved_errno;
}
static void *terminal_thread(void *ptr)
static MP_THREAD_VOID terminal_thread(void *ptr)
{
mpthread_set_name("terminal/input");
mp_thread_set_name("terminal/input");
bool stdin_ok = read_terminal; // if false, we still wait for SIGTERM
while (1) {
getch2_poll();
@ -461,7 +460,7 @@ static void *terminal_thread(void *ptr)
if (cmd)
mp_input_queue_cmd(input_ctx, cmd);
}
return NULL;
MP_THREAD_RETURN();
}
void terminal_setup_getch(struct input_ctx *ictx)
@ -483,7 +482,7 @@ void terminal_setup_getch(struct input_ctx *ictx)
input_ctx = ictx;
if (pthread_create(&input_thread, NULL, terminal_thread, NULL)) {
if (mp_thread_create(&input_thread, terminal_thread, NULL)) {
input_ctx = NULL;
close_sig_pipes();
close_tty();
@ -511,7 +510,7 @@ void terminal_uninit(void)
if (input_ctx) {
(void)write(death_pipe[1], &(char){0}, 1);
pthread_join(input_thread, NULL);
mp_thread_join(input_thread);
close_sig_pipes();
input_ctx = NULL;
}

View File

@ -24,7 +24,6 @@
#include <string.h>
#include <windows.h>
#include <io.h>
#include <pthread.h>
#include <assert.h>
#include "common/common.h"
#include "input/keycodes.h"
@ -92,7 +91,7 @@ static const unsigned char ansi2win32bg[8] = {
static bool running;
static HANDLE death;
static pthread_t input_thread;
static mp_thread input_thread;
static struct input_ctx *input_ctx;
void terminal_get_size(int *w, int *h)
@ -159,9 +158,9 @@ static void read_input(HANDLE in)
}
}
static void *input_thread_fn(void *ptr)
static MP_THREAD_VOID input_thread_fn(void *ptr)
{
mpthread_set_name("terminal/input");
mp_thread_set_name("terminal/input");
HANDLE in = ptr;
HANDLE stuff[2] = {in, death};
while (1) {
@ -170,7 +169,7 @@ static void *input_thread_fn(void *ptr)
break;
read_input(in);
}
return NULL;
MP_THREAD_RETURN();
}
void terminal_setup_getch(struct input_ctx *ictx)
@ -184,7 +183,7 @@ void terminal_setup_getch(struct input_ctx *ictx)
death = CreateEventW(NULL, TRUE, FALSE, NULL);
if (!death)
return;
if (pthread_create(&input_thread, NULL, input_thread_fn, in)) {
if (mp_thread_create(&input_thread, input_thread_fn, in)) {
CloseHandle(death);
return;
}
@ -196,7 +195,7 @@ void terminal_uninit(void)
{
if (running) {
SetEvent(death);
pthread_join(input_thread, NULL);
mp_thread_join(input_thread);
input_ctx = NULL;
running = false;
}

View File

@ -26,10 +26,11 @@
#include "common/common.h"
#include "common/msg.h"
#include "misc/random.h"
#include "threads.h"
#include "timer.h"
static uint64_t raw_time_offset;
static pthread_once_t timer_init_once = PTHREAD_ONCE_INIT;
static mp_once timer_init_once = MP_STATIC_ONCE_INITIALIZER;
static void do_timer_init(void)
{
@ -41,7 +42,7 @@ static void do_timer_init(void)
void mp_time_init(void)
{
pthread_once(&timer_init_once, do_timer_init);
mp_exec_once(&timer_init_once, do_timer_init);
}
int64_t mp_time_ns(void)

View File

@ -16,7 +16,7 @@
#ifndef MP_WRAP_SEMAPHORE_H_
#define MP_WRAP_SEMAPHORE_H_
#include <pthread.h>
#include "osdep/threads.h"
// See pthread.h for rationale.
#define sem_init m_sem_init
@ -29,8 +29,8 @@
#define SEM_VALUE_MAX 100
typedef struct {
pthread_mutex_t lock;
pthread_cond_t wakeup;
mp_mutex lock;
mp_cond wakeup;
unsigned int value;
} sem_t;

View File

@ -64,7 +64,7 @@
struct mp_client_api {
struct MPContext *mpctx;
pthread_mutex_t lock;
mp_mutex lock;
// -- protected by lock
@ -118,10 +118,10 @@ struct mpv_handle {
struct mpv_event_property cur_property_event;
struct observe_property *cur_property;
pthread_mutex_t lock;
mp_mutex lock;
pthread_mutex_t wakeup_lock;
pthread_cond_t wakeup;
mp_mutex wakeup_lock;
mp_cond wakeup;
// -- protected by wakeup_lock
bool need_wakeup;
@ -185,7 +185,7 @@ void mp_clients_init(struct MPContext *mpctx)
.mpctx = mpctx,
};
mpctx->global->client_api = mpctx->clients;
pthread_mutex_init(&mpctx->clients->lock, NULL);
mp_mutex_init(&mpctx->clients->lock);
}
void mp_clients_destroy(struct MPContext *mpctx)
@ -201,7 +201,7 @@ void mp_clients_destroy(struct MPContext *mpctx)
abort();
}
pthread_mutex_destroy(&mpctx->clients->lock);
mp_mutex_destroy(&mpctx->clients->lock);
talloc_free(mpctx->clients);
mpctx->clients = NULL;
}
@ -211,14 +211,14 @@ void mp_clients_destroy(struct MPContext *mpctx)
bool mp_clients_all_initialized(struct MPContext *mpctx)
{
bool all_ok = true;
pthread_mutex_lock(&mpctx->clients->lock);
mp_mutex_lock(&mpctx->clients->lock);
for (int n = 0; n < mpctx->clients->num_clients; n++) {
struct mpv_handle *ctx = mpctx->clients->clients[n];
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
all_ok &= ctx->fuzzy_initialized;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
pthread_mutex_unlock(&mpctx->clients->lock);
mp_mutex_unlock(&mpctx->clients->lock);
return all_ok;
}
@ -253,15 +253,15 @@ static struct mpv_handle *find_client(struct mp_client_api *clients,
bool mp_client_id_exists(struct MPContext *mpctx, int64_t id)
{
pthread_mutex_lock(&mpctx->clients->lock);
mp_mutex_lock(&mpctx->clients->lock);
bool r = find_client_id(mpctx->clients, id);
pthread_mutex_unlock(&mpctx->clients->lock);
mp_mutex_unlock(&mpctx->clients->lock);
return r;
}
struct mpv_handle *mp_new_client(struct mp_client_api *clients, const char *name)
{
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
char nname[MAX_CLIENT_NAME];
for (int n = 1; n < 1000; n++) {
@ -278,7 +278,7 @@ struct mpv_handle *mp_new_client(struct mp_client_api *clients, const char *name
}
if (!nname[0] || clients->shutting_down) {
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
return NULL;
}
@ -296,9 +296,9 @@ struct mpv_handle *mp_new_client(struct mp_client_api *clients, const char *name
.event_mask = (1ULL << INTERNAL_EVENT_BASE) - 1, // exclude internal events
.wakeup_pipe = {-1, -1},
};
pthread_mutex_init(&client->lock, NULL);
pthread_mutex_init(&client->wakeup_lock, NULL);
pthread_cond_init(&client->wakeup, NULL);
mp_mutex_init(&client->lock);
mp_mutex_init(&client->wakeup_lock);
mp_cond_init(&client->wakeup);
snprintf(client->name, sizeof(client->name), "%s", nname);
@ -308,7 +308,7 @@ struct mpv_handle *mp_new_client(struct mp_client_api *clients, const char *name
if (clients->num_clients == 1 && !clients->mpctx->is_cli)
client->fuzzy_initialized = true;
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
mpv_request_event(client, MPV_EVENT_TICK, 0);
@ -317,9 +317,9 @@ struct mpv_handle *mp_new_client(struct mp_client_api *clients, const char *name
void mp_client_set_weak(struct mpv_handle *ctx)
{
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
ctx->is_weak = true;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
const char *mpv_client_name(mpv_handle *ctx)
@ -344,43 +344,41 @@ struct mpv_global *mp_client_get_global(struct mpv_handle *ctx)
static void wakeup_client(struct mpv_handle *ctx)
{
pthread_mutex_lock(&ctx->wakeup_lock);
mp_mutex_lock(&ctx->wakeup_lock);
if (!ctx->need_wakeup) {
ctx->need_wakeup = true;
pthread_cond_broadcast(&ctx->wakeup);
mp_cond_broadcast(&ctx->wakeup);
if (ctx->wakeup_cb)
ctx->wakeup_cb(ctx->wakeup_cb_ctx);
if (ctx->wakeup_pipe[0] != -1)
(void)write(ctx->wakeup_pipe[1], &(char){0}, 1);
}
pthread_mutex_unlock(&ctx->wakeup_lock);
mp_mutex_unlock(&ctx->wakeup_lock);
}
// Note: the caller has to deal with sporadic wakeups.
static int wait_wakeup(struct mpv_handle *ctx, int64_t end)
{
int r = 0;
pthread_mutex_unlock(&ctx->lock);
pthread_mutex_lock(&ctx->wakeup_lock);
if (!ctx->need_wakeup) {
struct timespec ts = mp_time_ns_to_realtime(end);
r = pthread_cond_timedwait(&ctx->wakeup, &ctx->wakeup_lock, &ts);
}
mp_mutex_unlock(&ctx->lock);
mp_mutex_lock(&ctx->wakeup_lock);
if (!ctx->need_wakeup)
r = mp_cond_timedwait_until(&ctx->wakeup, &ctx->wakeup_lock, end);
if (r == 0)
ctx->need_wakeup = false;
pthread_mutex_unlock(&ctx->wakeup_lock);
pthread_mutex_lock(&ctx->lock);
mp_mutex_unlock(&ctx->wakeup_lock);
mp_mutex_lock(&ctx->lock);
return r;
}
void mpv_set_wakeup_callback(mpv_handle *ctx, void (*cb)(void *d), void *d)
{
pthread_mutex_lock(&ctx->wakeup_lock);
mp_mutex_lock(&ctx->wakeup_lock);
ctx->wakeup_cb = cb;
ctx->wakeup_cb_ctx = d;
if (ctx->wakeup_cb)
ctx->wakeup_cb(ctx->wakeup_cb_ctx);
pthread_mutex_unlock(&ctx->wakeup_lock);
mp_mutex_unlock(&ctx->wakeup_lock);
}
static void lock_core(mpv_handle *ctx)
@ -395,10 +393,10 @@ static void unlock_core(mpv_handle *ctx)
void mpv_wait_async_requests(mpv_handle *ctx)
{
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
while (ctx->reserved_events || ctx->async_counter)
wait_wakeup(ctx, INT64_MAX);
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
// Send abort signal to all matching work items.
@ -407,7 +405,7 @@ void mpv_wait_async_requests(mpv_handle *ctx)
static void abort_async(struct MPContext *mpctx, mpv_handle *ctx,
int type, uint64_t id)
{
pthread_mutex_lock(&mpctx->abort_lock);
mp_mutex_lock(&mpctx->abort_lock);
// Destroy all => ensure any newly appearing work is aborted immediately.
if (ctx == NULL)
@ -422,12 +420,12 @@ static void abort_async(struct MPContext *mpctx, mpv_handle *ctx,
}
}
pthread_mutex_unlock(&mpctx->abort_lock);
mp_mutex_unlock(&mpctx->abort_lock);
}
static void get_thread(void *ptr)
{
*(pthread_t *)ptr = pthread_self();
*(mp_thread *)ptr = mp_thread_self();
}
static void mp_destroy_client(mpv_handle *ctx, bool terminate)
@ -443,7 +441,7 @@ static void mp_destroy_client(mpv_handle *ctx, bool terminate)
if (terminate)
mpv_command(ctx, (const char*[]){"quit", NULL});
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
ctx->destroying = true;
@ -455,7 +453,7 @@ static void mp_destroy_client(mpv_handle *ctx, bool terminate)
prop_unref(ctx->cur_property);
ctx->cur_property = NULL;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
abort_async(mpctx, ctx, 0, 0);
@ -467,7 +465,7 @@ static void mp_destroy_client(mpv_handle *ctx, bool terminate)
osd_set_external_remove_owner(mpctx->osd, ctx);
mp_input_remove_sections_by_owner(mpctx->input, ctx->name);
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
for (int n = 0; n < clients->num_clients; n++) {
if (clients->clients[n] == ctx) {
@ -479,9 +477,9 @@ static void mp_destroy_client(mpv_handle *ctx, bool terminate)
ctx->num_events--;
}
mp_msg_log_buffer_destroy(ctx->messages);
pthread_cond_destroy(&ctx->wakeup);
pthread_mutex_destroy(&ctx->wakeup_lock);
pthread_mutex_destroy(&ctx->lock);
mp_cond_destroy(&ctx->wakeup);
mp_mutex_destroy(&ctx->wakeup_lock);
mp_mutex_destroy(&ctx->lock);
if (ctx->wakeup_pipe[0] != -1) {
close(ctx->wakeup_pipe[0]);
close(ctx->wakeup_pipe[1]);
@ -513,7 +511,7 @@ static void mp_destroy_client(mpv_handle *ctx, bool terminate)
// mp_hook_test_completion() also relies on this a bit.
mp_wakeup_core(mpctx);
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
// Note that even if num_clients==0, having set have_terminator keeps mpctx
// and the core thread alive.
@ -524,17 +522,17 @@ static void mp_destroy_client(mpv_handle *ctx, bool terminate)
mpctx->stop_play = PT_QUIT;
mp_dispatch_unlock(mpctx->dispatch);
pthread_t playthread;
mp_thread playthread;
mp_dispatch_run(mpctx->dispatch, get_thread, &playthread);
// Ask the core thread to stop.
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
clients->terminate_core_thread = true;
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
mp_wakeup_core(mpctx);
// Blocking wait for all clients and core thread to terminate.
pthread_join(playthread, NULL);
mp_thread_join(playthread);
mp_destroy(mpctx);
}
@ -559,7 +557,7 @@ void mp_shutdown_clients(struct MPContext *mpctx)
// Forcefully abort async work after 2 seconds of waiting.
double abort_time = mp_time_sec() + 2;
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
// Prevent that new clients can appear.
clients->shutting_down = true;
@ -568,7 +566,7 @@ void mp_shutdown_clients(struct MPContext *mpctx)
while (clients->num_clients || mpctx->outstanding_async ||
!(mpctx->is_cli || clients->terminate_core_thread))
{
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
double left = abort_time - mp_time_sec();
if (left >= 0) {
@ -583,26 +581,26 @@ void mp_shutdown_clients(struct MPContext *mpctx)
mp_client_broadcast_event(mpctx, MPV_EVENT_SHUTDOWN, NULL);
mp_wait_events(mpctx);
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
}
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
}
bool mp_is_shutting_down(struct MPContext *mpctx)
{
struct mp_client_api *clients = mpctx->clients;
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
bool res = clients->shutting_down;
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
return res;
}
static void *core_thread(void *p)
static MP_THREAD_VOID core_thread(void *p)
{
struct MPContext *mpctx = p;
mpthread_set_name("core");
mp_thread_set_name("core");
while (!mpctx->initialized && mpctx->stop_play != PT_QUIT)
mp_idle(mpctx);
@ -615,7 +613,7 @@ static void *core_thread(void *p)
// the last mpv_handle.
mp_shutdown_clients(mpctx);
return NULL;
MP_THREAD_RETURN();
}
mpv_handle *mpv_create(void)
@ -632,8 +630,8 @@ mpv_handle *mpv_create(void)
return NULL;
}
pthread_t thread;
if (pthread_create(&thread, NULL, core_thread, mpctx) != 0) {
mp_thread thread;
if (mp_thread_create(&thread, core_thread, mpctx) != 0) {
ctx->clients->have_terminator = true; // avoid blocking
mpv_terminate_destroy(ctx);
mp_destroy(mpctx);
@ -706,13 +704,13 @@ static void dup_event_data(struct mpv_event *ev)
static int reserve_reply(struct mpv_handle *ctx)
{
int res = MPV_ERROR_EVENT_QUEUE_FULL;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
if (ctx->reserved_events + ctx->num_events < ctx->max_events && !ctx->choked)
{
ctx->reserved_events++;
res = 0;
}
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return res;
}
@ -732,7 +730,7 @@ static int append_event(struct mpv_handle *ctx, struct mpv_event event, bool cop
static int send_event(struct mpv_handle *ctx, struct mpv_event *event, bool copy)
{
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
uint64_t mask = 1ULL << event->event_id;
if (ctx->property_event_masks & mask)
notify_property_events(ctx, event->event_id);
@ -748,7 +746,7 @@ static int send_event(struct mpv_handle *ctx, struct mpv_event *event, bool copy
ctx->choked = true;
}
}
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return r;
}
@ -758,20 +756,20 @@ static void send_reply(struct mpv_handle *ctx, uint64_t userdata,
struct mpv_event *event)
{
event->reply_userdata = userdata;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
// If this fails, reserve_reply() probably wasn't called.
assert(ctx->reserved_events > 0);
ctx->reserved_events--;
if (append_event(ctx, *event, false) < 0)
MP_ASSERT_UNREACHABLE();
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
void mp_client_broadcast_event(struct MPContext *mpctx, int event, void *data)
{
struct mp_client_api *clients = mpctx->clients;
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
for (int n = 0; n < clients->num_clients; n++) {
struct mpv_event event_data = {
@ -781,7 +779,7 @@ void mp_client_broadcast_event(struct MPContext *mpctx, int event, void *data)
send_event(clients->clients[n], &event_data, true);
}
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
}
// Like mp_client_broadcast_event(), but can be called from any thread.
@ -814,7 +812,7 @@ int mp_client_send_event(struct MPContext *mpctx, const char *client_name,
.reply_userdata = reply_userdata,
};
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
struct mpv_handle *ctx = find_client(clients, client_name);
if (ctx) {
@ -824,7 +822,7 @@ int mp_client_send_event(struct MPContext *mpctx, const char *client_name,
talloc_free(data);
}
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
return r;
}
@ -858,7 +856,7 @@ int mpv_request_event(mpv_handle *ctx, mpv_event_id event, int enable)
if (event == MPV_EVENT_SHUTDOWN && !enable)
return MPV_ERROR_INVALID_PARAMETER;
assert(event < (int)INTERNAL_EVENT_BASE); // excluded above; they have no name
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
uint64_t bit = 1ULL << event;
ctx->event_mask = enable ? ctx->event_mask | bit : ctx->event_mask & ~bit;
if (enable && event < MP_ARRAY_SIZE(deprecated_events) &&
@ -867,7 +865,7 @@ int mpv_request_event(mpv_handle *ctx, mpv_event_id event, int enable)
MP_WARN(ctx, "The '%s' event is deprecated and will be removed.\n",
mpv_event_name(event));
}
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return 0;
}
@ -896,7 +894,7 @@ mpv_event *mpv_wait_event(mpv_handle *ctx, double timeout)
{
mpv_event *event = ctx->cur_event;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
if (!ctx->fuzzy_initialized)
mp_wakeup_core(ctx->clients->mpctx);
@ -954,17 +952,17 @@ mpv_event *mpv_wait_event(mpv_handle *ctx, double timeout)
}
ctx->queued_wakeup = false;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return event;
}
void mpv_wakeup(mpv_handle *ctx)
{
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
ctx->queued_wakeup = true;
wakeup_client(ctx);
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
// map client API types to internal types
@ -1546,7 +1544,7 @@ int mpv_observe_property(mpv_handle *ctx, uint64_t userdata,
if (format == MPV_FORMAT_OSD_STRING)
return MPV_ERROR_PROPERTY_FORMAT;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
assert(!ctx->destroying);
struct observe_property *prop = talloc_ptrtype(ctx, prop);
talloc_set_destructor(prop, property_free);
@ -1569,14 +1567,14 @@ int mpv_observe_property(mpv_handle *ctx, uint64_t userdata,
ctx->new_property_events = true;
ctx->cur_property_index = 0;
ctx->has_pending_properties = true;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
mp_wakeup_core(ctx->mpctx);
return 0;
}
int mpv_unobserve_property(mpv_handle *ctx, uint64_t userdata)
{
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
int count = 0;
for (int n = ctx->num_properties - 1; n >= 0; n--) {
struct observe_property *prop = ctx->properties[n];
@ -1590,7 +1588,7 @@ int mpv_unobserve_property(mpv_handle *ctx, uint64_t userdata)
count++;
}
}
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return count;
}
@ -1625,11 +1623,11 @@ void mp_client_property_change(struct MPContext *mpctx, const char *name)
int id = mp_get_property_id(mpctx, name);
bool any_pending = false;
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
for (int n = 0; n < clients->num_clients; n++) {
struct mpv_handle *client = clients->clients[n];
pthread_mutex_lock(&client->lock);
mp_mutex_lock(&client->lock);
for (int i = 0; i < client->num_properties; i++) {
if (client->properties[i]->id == id &&
property_shared_prefix(name, client->properties[i]->name)) {
@ -1638,10 +1636,10 @@ void mp_client_property_change(struct MPContext *mpctx, const char *name)
any_pending = true;
}
}
pthread_mutex_unlock(&client->lock);
mp_mutex_unlock(&client->lock);
}
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
// If we're inside mp_dispatch_queue_process(), this will cause the playloop
// to be re-run (to get mp_client_send_property_changes() called). If we're
@ -1698,9 +1696,9 @@ static void send_client_property_changes(struct mpv_handle *ctx)
// or similar things are involved).
prop->refcount += 1; // keep prop alive (esp. prop->name)
ctx->async_counter += 1; // keep ctx alive
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
getproperty_fn(&req);
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
ctx->async_counter -= 1;
prop_unref(prop);
@ -1757,22 +1755,22 @@ void mp_client_send_property_changes(struct MPContext *mpctx)
{
struct mp_client_api *clients = mpctx->clients;
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
uint64_t cur_ts = clients->clients_list_change_ts;
for (int n = 0; n < clients->num_clients; n++) {
struct mpv_handle *ctx = clients->clients[n];
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
if (!ctx->has_pending_properties || ctx->destroying) {
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
continue;
}
// Keep ctx->lock locked (unlock order does not matter).
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
send_client_property_changes(ctx);
pthread_mutex_unlock(&ctx->lock);
pthread_mutex_lock(&clients->lock);
mp_mutex_unlock(&ctx->lock);
mp_mutex_lock(&clients->lock);
if (cur_ts != clients->clients_list_change_ts) {
// List changed; need to start over. Do it in the next iteration.
mp_wakeup_core(mpctx);
@ -1780,7 +1778,7 @@ void mp_client_send_property_changes(struct MPContext *mpctx)
}
}
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
}
// Set ctx->cur_event to a generated property change event, if there is any
@ -1887,7 +1885,7 @@ int mpv_request_log_messages(mpv_handle *ctx, const char *min_level)
if (level < 0 && strcmp(min_level, "no") != 0)
return MPV_ERROR_INVALID_PARAMETER;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
if (level < 0 || level != ctx->messages_level) {
mp_msg_log_buffer_destroy(ctx->messages);
ctx->messages = NULL;
@ -1902,7 +1900,7 @@ int mpv_request_log_messages(mpv_handle *ctx, const char *min_level)
mp_msg_log_buffer_set_silent(ctx->messages, silent);
}
wakeup_client(ctx);
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return 0;
}
@ -1934,13 +1932,13 @@ static bool gen_log_message_event(struct mpv_handle *ctx)
int mpv_get_wakeup_pipe(mpv_handle *ctx)
{
pthread_mutex_lock(&ctx->wakeup_lock);
mp_mutex_lock(&ctx->wakeup_lock);
if (ctx->wakeup_pipe[0] == -1) {
if (mp_make_wakeup_pipe(ctx->wakeup_pipe) >= 0)
(void)write(ctx->wakeup_pipe[1], &(char){0}, 1);
}
int fd = ctx->wakeup_pipe[0];
pthread_mutex_unlock(&ctx->wakeup_lock);
mp_mutex_unlock(&ctx->wakeup_lock);
return fd;
}
@ -2168,14 +2166,14 @@ bool mp_set_main_render_context(struct mp_client_api *client_api,
{
assert(ctx);
pthread_mutex_lock(&client_api->lock);
mp_mutex_lock(&client_api->lock);
bool is_set = !!client_api->render_context;
bool is_same = client_api->render_context == ctx;
// Can set if it doesn't remove another existing ctx.
bool res = is_same || !is_set;
if (res)
client_api->render_context = active ? ctx : NULL;
pthread_mutex_unlock(&client_api->lock);
mp_mutex_unlock(&client_api->lock);
return res;
}
@ -2184,10 +2182,10 @@ struct mpv_render_context *
mp_client_api_acquire_render_context(struct mp_client_api *ca)
{
struct mpv_render_context *res = NULL;
pthread_mutex_lock(&ca->lock);
mp_mutex_lock(&ca->lock);
if (ca->render_context && mp_render_context_acquire(ca->render_context))
res = ca->render_context;
pthread_mutex_unlock(&ca->lock);
mp_mutex_unlock(&ca->lock);
return res;
}
@ -2207,7 +2205,7 @@ int mpv_stream_cb_add_ro(mpv_handle *ctx, const char *protocol, void *user_data,
struct mp_client_api *clients = ctx->clients;
int r = 0;
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
for (int n = 0; n < clients->num_custom_protocols; n++) {
struct mp_custom_protocol *proto = &clients->custom_protocols[n];
if (strcmp(proto->protocol, protocol) == 0) {
@ -2226,7 +2224,7 @@ int mpv_stream_cb_add_ro(mpv_handle *ctx, const char *protocol, void *user_data,
MP_TARRAY_APPEND(clients, clients->custom_protocols,
clients->num_custom_protocols, proto);
}
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
return r;
}
@ -2235,7 +2233,7 @@ bool mp_streamcb_lookup(struct mpv_global *g, const char *protocol,
{
struct mp_client_api *clients = g->client_api;
bool found = false;
pthread_mutex_lock(&clients->lock);
mp_mutex_lock(&clients->lock);
for (int n = 0; n < clients->num_custom_protocols; n++) {
struct mp_custom_protocol *proto = &clients->custom_protocols[n];
if (strcmp(proto->protocol, protocol) == 0) {
@ -2245,6 +2243,6 @@ bool mp_streamcb_lookup(struct mpv_global *g, const char *protocol,
break;
}
}
pthread_mutex_unlock(&clients->lock);
mp_mutex_unlock(&clients->lock);
return found;
}

View File

@ -41,6 +41,7 @@
#include "common/stats.h"
#include "filters/f_decoder_wrapper.h"
#include "command.h"
#include "osdep/threads.h"
#include "osdep/timer.h"
#include "common/common.h"
#include "input/input.h"
@ -4847,7 +4848,7 @@ struct cmd_list_ctx {
struct mp_cmd_ctx *parent;
bool current_valid;
pthread_t current;
mp_thread current;
bool completed_recursive;
// list of sub commands yet to run
@ -4861,7 +4862,7 @@ static void on_cmd_list_sub_completion(struct mp_cmd_ctx *cmd)
{
struct cmd_list_ctx *list = cmd->on_completion_priv;
if (list->current_valid && pthread_equal(list->current, pthread_self())) {
if (list->current_valid && mp_thread_equal(list->current, mp_thread_self())) {
list->completed_recursive = true;
} else {
continue_cmd_list(list);
@ -4884,7 +4885,7 @@ static void continue_cmd_list(struct cmd_list_ctx *list)
list->completed_recursive = false;
list->current_valid = true;
list->current = pthread_self();
list->current = mp_thread_self();
run_command(list->mpctx, sub, NULL, on_cmd_list_sub_completion, list);
@ -5882,10 +5883,10 @@ static void cmd_subprocess(void *p)
fdctx[1].capture = cmd->args[3].v.b;
fdctx[2].capture = cmd->args[4].v.b;
pthread_mutex_lock(&mpctx->abort_lock);
mp_mutex_lock(&mpctx->abort_lock);
cmd->abort->coupled_to_playback = playback_only;
mp_abort_recheck_locked(mpctx, cmd->abort);
pthread_mutex_unlock(&mpctx->abort_lock);
mp_mutex_unlock(&mpctx->abort_lock);
mp_core_unlock(mpctx);

View File

@ -18,18 +18,18 @@
#ifndef MPLAYER_MP_CORE_H
#define MPLAYER_MP_CORE_H
#include <pthread.h>
#include <stdatomic.h>
#include <stdbool.h>
#include "libmpv/client.h"
#include "common/common.h"
#include "filters/filter.h"
#include "filters/f_output_chain.h"
#include "options/options.h"
#include "sub/osd.h"
#include "audio/aframe.h"
#include "common/common.h"
#include "filters/f_output_chain.h"
#include "filters/filter.h"
#include "options/options.h"
#include "osdep/threads.h"
#include "sub/osd.h"
#include "video/mp_image.h"
#include "video/out/vo.h"
@ -430,7 +430,7 @@ typedef struct MPContext {
int64_t builtin_script_ids[5];
pthread_mutex_t abort_lock;
mp_mutex abort_lock;
// --- The following fields are protected by abort_lock
struct mp_abort_entry **abort_list;
@ -438,7 +438,7 @@ typedef struct MPContext {
bool abort_all; // during final termination
// --- Owned by MPContext
pthread_t open_thread;
mp_thread open_thread;
bool open_active; // open_thread is a valid thread handle, all setup
atomic_bool open_done;
// --- All fields below are immutable while open_active is true.

View File

@ -74,7 +74,7 @@ void mp_abort_playback_async(struct MPContext *mpctx)
{
mp_cancel_trigger(mpctx->playback_abort);
pthread_mutex_lock(&mpctx->abort_lock);
mp_mutex_lock(&mpctx->abort_lock);
for (int n = 0; n < mpctx->num_abort_list; n++) {
struct mp_abort_entry *abort = mpctx->abort_list[n];
@ -82,25 +82,25 @@ void mp_abort_playback_async(struct MPContext *mpctx)
mp_abort_trigger_locked(mpctx, abort);
}
pthread_mutex_unlock(&mpctx->abort_lock);
mp_mutex_unlock(&mpctx->abort_lock);
}
// Add it to the global list, and allocate required data structures.
void mp_abort_add(struct MPContext *mpctx, struct mp_abort_entry *abort)
{
pthread_mutex_lock(&mpctx->abort_lock);
mp_mutex_lock(&mpctx->abort_lock);
assert(!abort->cancel);
abort->cancel = mp_cancel_new(NULL);
MP_TARRAY_APPEND(NULL, mpctx->abort_list, mpctx->num_abort_list, abort);
mp_abort_recheck_locked(mpctx, abort);
pthread_mutex_unlock(&mpctx->abort_lock);
mp_mutex_unlock(&mpctx->abort_lock);
}
// Remove Add it to the global list, and free/clear required data structures.
// Does not deallocate the abort value itself.
void mp_abort_remove(struct MPContext *mpctx, struct mp_abort_entry *abort)
{
pthread_mutex_lock(&mpctx->abort_lock);
mp_mutex_lock(&mpctx->abort_lock);
for (int n = 0; n < mpctx->num_abort_list; n++) {
if (mpctx->abort_list[n] == abort) {
MP_TARRAY_REMOVE_AT(mpctx->abort_list, mpctx->num_abort_list, n);
@ -110,7 +110,7 @@ void mp_abort_remove(struct MPContext *mpctx, struct mp_abort_entry *abort)
}
}
assert(!abort); // should have been in the list
pthread_mutex_unlock(&mpctx->abort_lock);
mp_mutex_unlock(&mpctx->abort_lock);
}
// Verify whether the abort needs to be signaled after changing certain fields
@ -1155,11 +1155,11 @@ static void load_per_file_options(m_config_t *conf,
}
}
static void *open_demux_thread(void *ctx)
static MP_THREAD_VOID open_demux_thread(void *ctx)
{
struct MPContext *mpctx = ctx;
mpthread_set_name("opener");
mp_thread_set_name("opener");
struct demuxer_params p = {
.force_format = mpctx->open_format,
@ -1197,7 +1197,7 @@ static void *open_demux_thread(void *ctx)
atomic_store(&mpctx->open_done, true);
mp_wakeup_core(mpctx);
return NULL;
MP_THREAD_RETURN();
}
static void cancel_open(struct MPContext *mpctx)
@ -1206,7 +1206,7 @@ static void cancel_open(struct MPContext *mpctx)
mp_cancel_trigger(mpctx->open_cancel);
if (mpctx->open_active)
pthread_join(mpctx->open_thread, NULL);
mp_thread_join(mpctx->open_thread);
mpctx->open_active = false;
if (mpctx->open_res_demuxer)
@ -1237,7 +1237,7 @@ static void start_open(struct MPContext *mpctx, char *url, int url_flags,
mpctx->open_url_flags = url_flags;
mpctx->open_for_prefetch = for_prefetch && mpctx->opts->demuxer_thread;
if (pthread_create(&mpctx->open_thread, NULL, open_demux_thread, mpctx)) {
if (mp_thread_create(&mpctx->open_thread, open_demux_thread, mpctx)) {
cancel_open(mpctx);
return;
}

View File

@ -21,7 +21,6 @@
#include <math.h>
#include <assert.h>
#include <string.h>
#include <pthread.h>
#include <locale.h>
#include "config.h"
@ -100,16 +99,16 @@ const char mp_help_text[] =
" --h=<string> print options which contain the given string in their name\n"
"\n";
static pthread_mutex_t terminal_owner_lock = PTHREAD_MUTEX_INITIALIZER;
static mp_static_mutex terminal_owner_lock = MP_STATIC_MUTEX_INITIALIZER;
static struct MPContext *terminal_owner;
static bool cas_terminal_owner(struct MPContext *old, struct MPContext *new)
{
pthread_mutex_lock(&terminal_owner_lock);
mp_mutex_lock(&terminal_owner_lock);
bool r = terminal_owner == old;
if (r)
terminal_owner = new;
pthread_mutex_unlock(&terminal_owner_lock);
mp_mutex_unlock(&terminal_owner_lock);
return r;
}
@ -197,7 +196,7 @@ void mp_destroy(struct MPContext *mpctx)
mp_msg_uninit(mpctx->global);
assert(!mpctx->num_abort_list);
talloc_free(mpctx->abort_list);
pthread_mutex_destroy(&mpctx->abort_lock);
mp_mutex_destroy(&mpctx->abort_lock);
talloc_free(mpctx->mconfig); // destroy before dispatch
talloc_free(mpctx);
}
@ -269,7 +268,7 @@ struct MPContext *mp_create(void)
.play_dir = 1,
};
pthread_mutex_init(&mpctx->abort_lock, NULL);
mp_mutex_init(&mpctx->abort_lock);
mpctx->global = talloc_zero(mpctx, struct mpv_global);
@ -420,7 +419,7 @@ int mp_initialize(struct MPContext *mpctx, char **options)
int mpv_main(int argc, char *argv[])
{
mpthread_set_name("mpv");
mp_thread_set_name("mpv");
struct MPContext *mpctx = mp_create();
if (!mpctx)
return 1;

View File

@ -20,7 +20,6 @@
#include <sys/types.h>
#include <dirent.h>
#include <math.h>
#include <pthread.h>
#include <assert.h>
#include <unistd.h>
@ -87,7 +86,7 @@ static void run_script(struct mp_script_args *arg)
{
char *name = talloc_asprintf(NULL, "%s/%s", arg->backend->name,
mpv_client_name(arg->client));
mpthread_set_name(name);
mp_thread_set_name(name);
talloc_free(name);
if (arg->backend->load(arg) < 0)
@ -97,14 +96,14 @@ static void run_script(struct mp_script_args *arg)
talloc_free(arg);
}
static void *script_thread(void *p)
static MP_THREAD_VOID script_thread(void *p)
{
pthread_detach(pthread_self());
pthread_detach(mp_thread_self());
struct mp_script_args *arg = p;
run_script(arg);
return NULL;
MP_THREAD_RETURN();
}
static int64_t mp_load_script(struct MPContext *mpctx, const char *fname)
@ -193,8 +192,8 @@ static int64_t mp_load_script(struct MPContext *mpctx, const char *fname)
if (backend->no_thread) {
run_script(arg);
} else {
pthread_t thread;
if (pthread_create(&thread, NULL, script_thread, arg)) {
mp_thread thread;
if (mp_thread_create(&thread, script_thread, arg)) {
mpv_destroy(arg->client);
talloc_free(arg);
return -1;

View File

@ -41,7 +41,6 @@
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <pthread.h>
#include "osdep/io.h"
#include "misc/ctype.h"
@ -53,6 +52,7 @@
#include "options/m_option.h"
#include "options/options.h"
#include "options/path.h"
#include "osdep/threads.h"
#include "dvbin.h"
#include "dvb_tune.h"
@ -66,7 +66,7 @@
#define OPT_BASE_STRUCT dvb_opts_t
static dvb_state_t *global_dvb_state = NULL;
static pthread_mutex_t global_dvb_state_lock = PTHREAD_MUTEX_INITIALIZER;
static mp_static_mutex global_dvb_state_lock = MP_STATIC_MUTEX_INITIALIZER;
const struct m_sub_options stream_dvb_conf = {
.opts = (const m_option_t[]) {
@ -821,9 +821,9 @@ void dvbin_close(stream_t *stream)
if (state->switching_channel && state->is_on) {
// Prevent state destruction, reset channel-switch.
state->switching_channel = false;
pthread_mutex_lock(&global_dvb_state_lock);
mp_mutex_lock(&global_dvb_state_lock);
global_dvb_state->stream_used = false;
pthread_mutex_unlock(&global_dvb_state_lock);
mp_mutex_unlock(&global_dvb_state_lock);
return;
}
@ -839,9 +839,9 @@ void dvbin_close(stream_t *stream)
state->cur_adapter = -1;
state->cur_frontend = -1;
pthread_mutex_lock(&global_dvb_state_lock);
mp_mutex_lock(&global_dvb_state_lock);
TA_FREEP(&global_dvb_state);
pthread_mutex_unlock(&global_dvb_state_lock);
mp_mutex_unlock(&global_dvb_state_lock);
}
static int dvb_streaming_start(stream_t *stream, char *progname)
@ -918,10 +918,10 @@ static int dvb_open(stream_t *stream)
{
dvb_priv_t *priv = NULL;
pthread_mutex_lock(&global_dvb_state_lock);
mp_mutex_lock(&global_dvb_state_lock);
if (global_dvb_state && global_dvb_state->stream_used) {
MP_ERR(stream, "DVB stream already in use, only one DVB stream can exist at a time!\n");
pthread_mutex_unlock(&global_dvb_state_lock);
mp_mutex_unlock(&global_dvb_state_lock);
goto err_out;
}
@ -937,17 +937,17 @@ static int dvb_open(stream_t *stream)
priv->log = stream->log;
if (!state) {
MP_ERR(stream, "DVB configuration is empty\n");
pthread_mutex_unlock(&global_dvb_state_lock);
mp_mutex_unlock(&global_dvb_state_lock);
goto err_out;
}
if (!dvb_parse_path(stream)) {
pthread_mutex_unlock(&global_dvb_state_lock);
mp_mutex_unlock(&global_dvb_state_lock);
goto err_out;
}
state->stream_used = true;
pthread_mutex_unlock(&global_dvb_state_lock);
mp_mutex_unlock(&global_dvb_state_lock);
if (!state->is_on) {
// State could be already initialized, for example, we just did a channel switch.

View File

@ -15,8 +15,6 @@
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#include <pthread.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/opt.h>
@ -457,4 +455,3 @@ const stream_info_t stream_info_ffmpeg_unsafe = {
.stream_origin = STREAM_ORIGIN_UNSAFE,
.can_write = true,
};

View File

@ -20,7 +20,6 @@
#include <string.h>
#include <math.h>
#include <assert.h>
#include <pthread.h>
#include "demux/demux.h"
#include "sd.h"
@ -43,7 +42,7 @@ static const struct sd_functions *const sd_list[] = {
};
struct dec_sub {
pthread_mutex_t lock;
mp_mutex lock;
struct mp_log *log;
struct mpv_global *global;
@ -126,7 +125,7 @@ void sub_destroy(struct dec_sub *sub)
sub->sd->driver->uninit(sub->sd);
}
talloc_free(sub->sd);
pthread_mutex_destroy(&sub->lock);
mp_mutex_destroy(&sub->lock);
talloc_free(sub);
}
@ -182,7 +181,7 @@ struct dec_sub *sub_create(struct mpv_global *global, struct track *track,
.end = MP_NOPTS_VALUE,
};
sub->opts = sub->opts_cache->opts;
mpthread_mutex_init_recursive(&sub->lock);
mp_mutex_init_type(&sub->lock, MP_MUTEX_RECURSIVE);
sub->sd = init_decoder(sub);
if (sub->sd) {
@ -227,15 +226,15 @@ static void update_segment(struct dec_sub *sub)
bool sub_can_preload(struct dec_sub *sub)
{
bool r;
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
r = sub->sd->driver->accept_packets_in_advance && !sub->preload_attempted;
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
return r;
}
void sub_preload(struct dec_sub *sub)
{
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
struct mp_dispatch_queue *demux_waiter = mp_dispatch_create(NULL);
demux_set_stream_wakeup_cb(sub->sh, wakeup_demux, demux_waiter);
@ -258,7 +257,7 @@ void sub_preload(struct dec_sub *sub)
demux_set_stream_wakeup_cb(sub->sh, NULL, NULL);
talloc_free(demux_waiter);
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
}
static bool is_new_segment(struct dec_sub *sub, struct demux_packet *p)
@ -273,7 +272,7 @@ static bool is_new_segment(struct dec_sub *sub, struct demux_packet *p)
bool sub_read_packets(struct dec_sub *sub, double video_pts, bool force)
{
bool r = true;
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
video_pts = pts_to_subtitle(sub, video_pts);
while (1) {
bool read_more = true;
@ -331,7 +330,7 @@ bool sub_read_packets(struct dec_sub *sub, double video_pts, bool force)
if (!(sub->preload_attempted && sub->sd->preload_ok))
sub->sd->driver->decode(sub->sd, pkt);
}
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
return r;
}
@ -339,19 +338,19 @@ bool sub_read_packets(struct dec_sub *sub, double video_pts, bool force)
// Used with UPDATE_SUB_HARD and UPDATE_SUB_FILT.
void sub_redecode_cached_packets(struct dec_sub *sub)
{
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
if (sub->cached_pkts[0])
sub->sd->driver->decode(sub->sd, sub->cached_pkts[0]);
if (sub->cached_pkts[1])
sub->sd->driver->decode(sub->sd, sub->cached_pkts[1]);
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
}
// Unref sub_bitmaps.rc to free the result. May return NULL.
struct sub_bitmaps *sub_get_bitmaps(struct dec_sub *sub, struct mp_osd_res dim,
int format, double pts)
{
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
pts = pts_to_subtitle(sub, pts);
@ -364,14 +363,14 @@ struct sub_bitmaps *sub_get_bitmaps(struct dec_sub *sub, struct mp_osd_res dim,
sub->sd->driver->get_bitmaps)
res = sub->sd->driver->get_bitmaps(sub->sd, dim, format, pts);
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
return res;
}
// The returned string is talloc'ed.
char *sub_get_text(struct dec_sub *sub, double pts, enum sd_text_type type)
{
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
char *text = NULL;
pts = pts_to_subtitle(sub, pts);
@ -381,7 +380,7 @@ char *sub_get_text(struct dec_sub *sub, double pts, enum sd_text_type type)
if (sub->sd->driver->get_text)
text = sub->sd->driver->get_text(sub->sd, pts, type);
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
return text;
}
@ -396,7 +395,7 @@ char *sub_ass_get_extradata(struct dec_sub *sub)
struct sd_times sub_get_times(struct dec_sub *sub, double pts)
{
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
struct sd_times res = { .start = MP_NOPTS_VALUE, .end = MP_NOPTS_VALUE };
pts = pts_to_subtitle(sub, pts);
@ -407,13 +406,13 @@ struct sd_times sub_get_times(struct dec_sub *sub, double pts)
if (sub->sd->driver->get_times)
res = sub->sd->driver->get_times(sub->sd, pts);
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
return res;
}
void sub_reset(struct dec_sub *sub)
{
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
if (sub->sd->driver->reset)
sub->sd->driver->reset(sub->sd);
sub->last_pkt_pts = MP_NOPTS_VALUE;
@ -421,21 +420,21 @@ void sub_reset(struct dec_sub *sub)
TA_FREEP(&sub->cached_pkts[0]);
TA_FREEP(&sub->cached_pkts[1]);
TA_FREEP(&sub->new_segment);
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
}
void sub_select(struct dec_sub *sub, bool selected)
{
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
if (sub->sd->driver->select)
sub->sd->driver->select(sub->sd, selected);
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
}
int sub_control(struct dec_sub *sub, enum sd_ctrl cmd, void *arg)
{
int r = CONTROL_UNKNOWN;
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
bool propagate = false;
switch (cmd) {
case SD_CTRL_SET_VIDEO_DEF_FPS:
@ -470,22 +469,22 @@ int sub_control(struct dec_sub *sub, enum sd_ctrl cmd, void *arg)
}
if (propagate && sub->sd->driver->control)
r = sub->sd->driver->control(sub->sd, cmd, arg);
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
return r;
}
void sub_set_recorder_sink(struct dec_sub *sub, struct mp_recorder_sink *sink)
{
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
sub->recorder_sink = sink;
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
}
void sub_set_play_dir(struct dec_sub *sub, int dir)
{
pthread_mutex_lock(&sub->lock);
mp_mutex_lock(&sub->lock);
sub->play_dir = dir;
pthread_mutex_unlock(&sub->lock);
mp_mutex_unlock(&sub->lock);
}
bool sub_is_primary_visible(struct dec_sub *sub)

View File

@ -129,7 +129,7 @@ struct osd_state *osd_create(struct mpv_global *global)
.force_video_pts = MP_NOPTS_VALUE,
.stats = stats_ctx_create(osd, global, "osd"),
};
pthread_mutex_init(&osd->lock, NULL);
mp_mutex_init(&osd->lock);
osd->opts = osd->opts_cache->opts;
for (int n = 0; n < MAX_OSD_PARTS; n++) {
@ -156,13 +156,13 @@ void osd_free(struct osd_state *osd)
return;
osd_destroy_backend(osd);
talloc_free(osd->objs[OSDTYPE_EXTERNAL2]->external2);
pthread_mutex_destroy(&osd->lock);
mp_mutex_destroy(&osd->lock);
talloc_free(osd);
}
void osd_set_text(struct osd_state *osd, const char *text)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
struct osd_object *osd_obj = osd->objs[OSDTYPE_OSD];
if (!text)
text = "";
@ -172,32 +172,32 @@ void osd_set_text(struct osd_state *osd, const char *text)
osd_obj->osd_changed = true;
osd->want_redraw_notification = true;
}
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
}
void osd_set_sub(struct osd_state *osd, int index, struct dec_sub *dec_sub)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
if (index >= 0 && index < 2) {
struct osd_object *obj = osd->objs[OSDTYPE_SUB + index];
obj->sub = dec_sub;
obj->vo_change_id += 1;
}
osd->want_redraw_notification = true;
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
}
bool osd_get_render_subs_in_filter(struct osd_state *osd)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
bool r = osd->render_subs_in_filter;
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
return r;
}
void osd_set_render_subs_in_filter(struct osd_state *osd, bool s)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
if (osd->render_subs_in_filter != s) {
osd->render_subs_in_filter = s;
@ -207,7 +207,7 @@ void osd_set_render_subs_in_filter(struct osd_state *osd, bool s)
for (int n = 0; n < MAX_OSD_PARTS; n++)
osd->objs[n]->vo_change_id = change_id + 1;
}
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
}
void osd_set_force_video_pts(struct osd_state *osd, double video_pts)
@ -222,7 +222,7 @@ double osd_get_force_video_pts(struct osd_state *osd)
void osd_set_progbar(struct osd_state *osd, struct osd_progbar_state *s)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
struct osd_object *osd_obj = osd->objs[OSDTYPE_OSD];
osd_obj->progbar_state.type = s->type;
osd_obj->progbar_state.value = s->value;
@ -234,18 +234,18 @@ void osd_set_progbar(struct osd_state *osd, struct osd_progbar_state *s)
}
osd_obj->osd_changed = true;
osd->want_redraw_notification = true;
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
}
void osd_set_external2(struct osd_state *osd, struct sub_bitmaps *imgs)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
struct osd_object *obj = osd->objs[OSDTYPE_EXTERNAL2];
talloc_free(obj->external2);
obj->external2 = sub_bitmaps_copy(NULL, imgs);
obj->vo_change_id += 1;
osd->want_redraw_notification = true;
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
}
static void check_obj_resize(struct osd_state *osd, struct mp_osd_res res,
@ -268,11 +268,11 @@ static void check_obj_resize(struct osd_state *osd, struct mp_osd_res res,
// Unnecessary for anything else.
void osd_resize(struct osd_state *osd, struct mp_osd_res res)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
int types[] = {OSDTYPE_OSD, OSDTYPE_EXTERNAL, OSDTYPE_EXTERNAL2, -1};
for (int n = 0; types[n] >= 0; n++)
check_obj_resize(osd, res, osd->objs[types[n]]);
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
}
static struct sub_bitmaps *render_object(struct osd_state *osd,
@ -326,7 +326,7 @@ struct sub_bitmap_list *osd_render(struct osd_state *osd, struct mp_osd_res res,
double video_pts, int draw_flags,
const bool formats[SUBBITMAP_COUNT])
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
struct sub_bitmap_list *list = talloc_zero(NULL, struct sub_bitmap_list);
list->change_id = 1;
@ -383,7 +383,7 @@ struct sub_bitmap_list *osd_render(struct osd_state *osd, struct mp_osd_res res,
if (!(draw_flags & OSD_DRAW_SUB_FILTER))
osd->want_redraw_notification = false;
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
return list;
}
@ -433,7 +433,7 @@ void osd_draw_on_image_p(struct osd_state *osd, struct mp_osd_res res,
return; // on OOM, skip
// Need to lock for the dumb osd->draw_cache thing.
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
if (!osd->draw_cache)
osd->draw_cache = mp_draw_sub_alloc(osd, osd->global);
@ -446,7 +446,7 @@ void osd_draw_on_image_p(struct osd_state *osd, struct mp_osd_res res,
stats_time_end(osd->stats, "draw-bmp");
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
talloc_free(list);
}
@ -466,29 +466,29 @@ struct mp_osd_res osd_res_from_image_params(const struct mp_image_params *p)
// Typically called to react to OSD style changes.
void osd_changed(struct osd_state *osd)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
osd->objs[OSDTYPE_OSD]->osd_changed = true;
osd->want_redraw_notification = true;
// Done here for a lack of a better place.
m_config_cache_update(osd->opts_cache);
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
}
bool osd_query_and_reset_want_redraw(struct osd_state *osd)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
bool r = osd->want_redraw_notification;
osd->want_redraw_notification = false;
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
return r;
}
struct mp_osd_res osd_get_vo_res(struct osd_state *osd)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
// Any OSDTYPE is fine; but it mustn't be a subtitle one (can have lower res.)
struct mp_osd_res res = osd->objs[OSDTYPE_OSD]->vo_res;
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
return res;
}

View File

@ -268,12 +268,12 @@ static void update_osd_text(struct osd_state *osd, struct osd_object *obj)
void osd_get_text_size(struct osd_state *osd, int *out_screen_h, int *out_font_h)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
struct osd_object *obj = osd->objs[OSDTYPE_OSD];
ASS_Style *style = prepare_osd_ass(osd, obj);
*out_screen_h = obj->ass.track->PlayResY - style->MarginV;
*out_font_h = style->FontSize;
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
}
// align: -1 .. +1
@ -534,7 +534,7 @@ static int cmp_zorder(const void *pa, const void *pb)
void osd_set_external(struct osd_state *osd, struct osd_external_ass *ov)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
struct osd_object *obj = osd->objs[OSDTYPE_EXTERNAL];
bool zorder_changed = false;
int index = -1;
@ -616,12 +616,12 @@ void osd_set_external(struct osd_state *osd, struct osd_external_ass *ov)
}
done:
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
}
void osd_set_external_remove_owner(struct osd_state *osd, void *owner)
{
pthread_mutex_lock(&osd->lock);
mp_mutex_lock(&osd->lock);
struct osd_object *obj = osd->objs[OSDTYPE_EXTERNAL];
for (int n = obj->num_externals - 1; n >= 0; n--) {
struct osd_external *e = obj->externals[n];
@ -632,7 +632,7 @@ void osd_set_external_remove_owner(struct osd_state *osd, void *owner)
osd->want_redraw_notification = true;
}
}
pthread_mutex_unlock(&osd->lock);
mp_mutex_unlock(&osd->lock);
}
static void append_ass(struct ass_state *ass, struct mp_osd_res *res,

View File

@ -1,10 +1,10 @@
#ifndef MP_OSD_STATE_H_
#define MP_OSD_STATE_H_
#include <pthread.h>
#include <stdatomic.h>
#include "osd.h"
#include "osdep/threads.h"
enum mp_osdtype {
OSDTYPE_SUB,
@ -66,7 +66,7 @@ struct osd_external {
};
struct osd_state {
pthread_mutex_t lock;
mp_mutex lock;
struct osd_object *objs[MAX_OSD_PARTS];

20
ta/ta.c
View File

@ -265,9 +265,9 @@ void ta_set_destructor(void *ptr, void (*destructor)(void *))
#if TA_MEMORY_DEBUGGING
#include <pthread.h>
#include "osdep/threads.h"
static pthread_mutex_t ta_dbg_mutex = PTHREAD_MUTEX_INITIALIZER;
static mp_static_mutex ta_dbg_mutex = MP_STATIC_MUTEX_INITIALIZER;
static bool enable_leak_check; // pretty much constant
static struct ta_header leak_node;
static char allocation_is_string;
@ -276,12 +276,12 @@ static void ta_dbg_add(struct ta_header *h)
{
h->canary = CANARY;
if (enable_leak_check) {
pthread_mutex_lock(&ta_dbg_mutex);
mp_mutex_lock(&ta_dbg_mutex);
h->leak_next = &leak_node;
h->leak_prev = leak_node.leak_prev;
leak_node.leak_prev->leak_next = h;
leak_node.leak_prev = h;
pthread_mutex_unlock(&ta_dbg_mutex);
mp_mutex_unlock(&ta_dbg_mutex);
}
}
@ -300,10 +300,10 @@ static void ta_dbg_remove(struct ta_header *h)
{
ta_dbg_check_header(h);
if (h->leak_next) { // assume checking for !=NULL invariant ok without lock
pthread_mutex_lock(&ta_dbg_mutex);
mp_mutex_lock(&ta_dbg_mutex);
h->leak_next->leak_prev = h->leak_prev;
h->leak_prev->leak_next = h->leak_next;
pthread_mutex_unlock(&ta_dbg_mutex);
mp_mutex_unlock(&ta_dbg_mutex);
h->leak_next = h->leak_prev = NULL;
}
h->canary = 0;
@ -319,7 +319,7 @@ static size_t get_children_size(struct ta_header *h)
static void print_leak_report(void)
{
pthread_mutex_lock(&ta_dbg_mutex);
mp_mutex_lock(&ta_dbg_mutex);
if (leak_node.leak_next && leak_node.leak_next != &leak_node) {
size_t size = 0;
size_t num_blocks = 0;
@ -354,19 +354,19 @@ static void print_leak_report(void)
}
fprintf(stderr, "%zu bytes in %zu blocks.\n", size, num_blocks);
}
pthread_mutex_unlock(&ta_dbg_mutex);
mp_mutex_unlock(&ta_dbg_mutex);
}
void ta_enable_leak_report(void)
{
pthread_mutex_lock(&ta_dbg_mutex);
mp_mutex_lock(&ta_dbg_mutex);
enable_leak_check = true;
if (!leak_node.leak_prev && !leak_node.leak_next) {
leak_node.leak_prev = &leak_node;
leak_node.leak_next = &leak_node;
atexit(print_leak_report);
}
pthread_mutex_unlock(&ta_dbg_mutex);
mp_mutex_unlock(&ta_dbg_mutex);
}
/* Set a (static) string that will be printed if the memory allocation in ptr

View File

@ -15,8 +15,6 @@
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#include <pthread.h>
#include "config.h"
#include <libavcodec/avcodec.h>
@ -28,20 +26,21 @@
#include <libavutil/hwcontext_dxva2.h>
#endif
#include "common/common.h"
#include "common/av_common.h"
#include "common/common.h"
#include "osdep/threads.h"
#include "osdep/windows_utils.h"
#include "video/fmt-conversion.h"
#include "video/hwdec.h"
#include "video/mp_image.h"
#include "video/mp_image_pool.h"
#include "osdep/windows_utils.h"
#include "video/mp_image.h"
#include "d3d.h"
HMODULE d3d11_dll, d3d9_dll, dxva2_dll;
PFN_D3D11_CREATE_DEVICE d3d11_D3D11CreateDevice;
static pthread_once_t d3d_load_once = PTHREAD_ONCE_INIT;
static mp_once d3d_load_once = MP_STATIC_ONCE_INITIALIZER;
#if !HAVE_UWP
static void d3d_do_load(void)
@ -65,7 +64,7 @@ static void d3d_do_load(void)
void d3d_load_dlls(void)
{
pthread_once(&d3d_load_once, d3d_do_load);
mp_exec_once(&d3d_load_once, d3d_do_load);
}
// Test if Direct3D11 can be used by us. Basically, this prevents trying to use

View File

@ -18,7 +18,6 @@
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <assert.h>
#include <stdbool.h>
@ -35,6 +34,7 @@
#include "common/msg.h"
#include "options/m_config.h"
#include "options/options.h"
#include "osdep/threads.h"
#include "misc/bstr.h"
#include "common/av_common.h"
#include "common/codecs.h"
@ -217,7 +217,7 @@ typedef struct lavc_ctx {
AVBufferRef *cached_hw_frames_ctx;
// --- The following fields are protected by dr_lock.
pthread_mutex_t dr_lock;
mp_mutex dr_lock;
bool dr_failed;
struct mp_image_pool *dr_pool;
int dr_imgfmt, dr_w, dr_h, dr_stride_align;
@ -1009,7 +1009,7 @@ static int get_buffer2_direct(AVCodecContext *avctx, AVFrame *pic, int flags)
struct mp_filter *vd = avctx->opaque;
vd_ffmpeg_ctx *p = vd->priv;
pthread_mutex_lock(&p->dr_lock);
mp_mutex_lock(&p->dr_lock);
int w = pic->width;
int h = pic->height;
@ -1081,7 +1081,7 @@ static int get_buffer2_direct(AVCodecContext *avctx, AVFrame *pic, int flags)
}
talloc_free(img);
pthread_mutex_unlock(&p->dr_lock);
mp_mutex_unlock(&p->dr_lock);
return 0;
@ -1089,7 +1089,7 @@ fallback:
if (!p->dr_failed)
MP_VERBOSE(p, "DR failed - disabling.\n");
p->dr_failed = true;
pthread_mutex_unlock(&p->dr_lock);
mp_mutex_unlock(&p->dr_lock);
return avcodec_default_get_buffer2(avctx, pic, flags);
}
@ -1392,7 +1392,7 @@ static void destroy(struct mp_filter *vd)
uninit_avctx(vd);
pthread_mutex_destroy(&ctx->dr_lock);
mp_mutex_destroy(&ctx->dr_lock);
}
static const struct mp_filter_info vd_lavc_filter = {
@ -1428,7 +1428,7 @@ static struct mp_decoder *create(struct mp_filter *parent,
ctx->public.f = vd;
ctx->public.control = control;
pthread_mutex_init(&ctx->dr_lock, NULL);
mp_mutex_init(&ctx->dr_lock);
// hwdec/DR
struct mp_stream_info *info = mp_filter_find_stream_info(vd);

View File

@ -19,7 +19,6 @@
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <pthread.h>
#include <limits.h>
#include <assert.h>
@ -30,13 +29,14 @@
#include <libavutil/cpu.h>
#include "common/msg.h"
#include "options/m_option.h"
#include "options/path.h"
#include "filters/f_autoconvert.h"
#include "filters/f_utils.h"
#include "filters/filter.h"
#include "filters/filter_internal.h"
#include "filters/filter.h"
#include "filters/user_filters.h"
#include "options/m_option.h"
#include "options/path.h"
#include "osdep/threads.h"
#include "video/img_format.h"
#include "video/mp_image.h"
#include "video/sws_utils.h"
@ -70,8 +70,8 @@ struct priv {
// Format for which VS is currently configured.
struct mp_image_params fmt_in;
pthread_mutex_t lock;
pthread_cond_t wakeup;
mp_mutex lock;
mp_cond wakeup;
// --- the following members are all protected by lock
struct mp_image **buffered; // oldest image first
@ -291,7 +291,7 @@ static void VS_CC vs_frame_done(void *userData, const VSFrameRef *f, int n,
p->vsapi->freeFrame(f);
}
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
// If these assertions fail, n is an unrequested frame (or filtered twice).
assert(n >= p->out_frameno && n < p->out_frameno + p->max_requests);
@ -308,8 +308,8 @@ static void VS_CC vs_frame_done(void *userData, const VSFrameRef *f, int n,
}
}
p->requested[index] = res;
pthread_cond_broadcast(&p->wakeup);
pthread_mutex_unlock(&p->lock);
mp_cond_broadcast(&p->wakeup);
mp_mutex_unlock(&p->lock);
mp_filter_wakeup(p->f);
}
@ -317,7 +317,7 @@ static void vf_vapoursynth_process(struct mp_filter *f)
{
struct priv *p = f->priv;
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
if (p->failed) {
// Not sure what we do on errors, but at least don't deadlock.
@ -336,7 +336,7 @@ static void vf_vapoursynth_process(struct mp_filter *f)
if (p->out_node && !p->eof) {
MP_VERBOSE(p, "initiate EOF\n");
p->eof = true;
pthread_cond_broadcast(&p->wakeup);
mp_cond_broadcast(&p->wakeup);
}
if (!p->out_node && mp_pin_in_needs_data(f->ppins[1])) {
MP_VERBOSE(p, "return EOF\n");
@ -360,11 +360,11 @@ static void vf_vapoursynth_process(struct mp_filter *f)
MP_VERBOSE(p, "draining VS for format change\n");
mp_pin_out_unread(p->in_pin, frame);
p->eof = true;
pthread_cond_broadcast(&p->wakeup);
mp_cond_broadcast(&p->wakeup);
mp_filter_internal_mark_progress(f);
goto done;
}
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
if (p->out_node)
destroy_vs(p);
p->fmt_in = mpi->params;
@ -374,13 +374,13 @@ static void vf_vapoursynth_process(struct mp_filter *f)
mp_filter_internal_mark_failed(f);
return;
}
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
}
if (p->out_pts == MP_NOPTS_VALUE)
p->out_pts = mpi->pts;
p->frames_sent++;
p->buffered[p->num_buffered++] = mpi;
pthread_cond_broadcast(&p->wakeup);
mp_cond_broadcast(&p->wakeup);
} else if (frame.type != MP_FRAME_NONE) {
MP_ERR(p, "discarding unknown frame type\n");
mp_frame_unref(&frame);
@ -413,7 +413,7 @@ static void vf_vapoursynth_process(struct mp_filter *f)
if (p->requested[0] == &dummy_img_eof) {
MP_VERBOSE(p, "finishing up\n");
assert(p->eof);
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
destroy_vs(p);
mp_filter_internal_mark_progress(f);
return;
@ -436,7 +436,7 @@ static void vf_vapoursynth_process(struct mp_filter *f)
}
done:
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
}
static void VS_CC infiltInit(VSMap *in, VSMap *out, void **instanceData,
@ -473,7 +473,7 @@ static const VSFrameRef *VS_CC infiltGetFrame(int frameno, int activationReason,
struct priv *p = *instanceData;
VSFrameRef *ret = NULL;
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
MP_TRACE(p, "VS asking for frame %d (at %d)\n", frameno, p->in_frameno);
while (1) {
if (p->shutdown) {
@ -511,7 +511,7 @@ static const VSFrameRef *VS_CC infiltGetFrame(int frameno, int activationReason,
// queue new frames.
if (p->num_buffered) {
drain_oldest_buffered_frame(p);
pthread_cond_broadcast(&p->wakeup);
mp_cond_broadcast(&p->wakeup);
mp_filter_wakeup(p->f);
continue;
}
@ -536,19 +536,19 @@ static const VSFrameRef *VS_CC infiltGetFrame(int frameno, int activationReason,
break;
}
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
struct mp_image vsframe = map_vs_frame(p, ret, true);
mp_image_copy(&vsframe, img);
int res = 1e6;
int dur = img->pkt_duration * res + 0.5;
set_vs_frame_props(p, ret, img, dur, res);
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
break;
}
pthread_cond_wait(&p->wakeup, &p->lock);
mp_cond_wait(&p->wakeup, &p->lock);
}
pthread_cond_broadcast(&p->wakeup);
pthread_mutex_unlock(&p->lock);
mp_cond_broadcast(&p->wakeup);
mp_mutex_unlock(&p->lock);
return ret;
}
@ -556,10 +556,10 @@ static void VS_CC infiltFree(void *instanceData, VSCore *core, const VSAPI *vsap
{
struct priv *p = instanceData;
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
p->in_node_active = false;
pthread_cond_broadcast(&p->wakeup);
pthread_mutex_unlock(&p->lock);
mp_cond_broadcast(&p->wakeup);
mp_mutex_unlock(&p->lock);
}
// number of getAsyncFrame calls in progress
@ -580,13 +580,13 @@ static void destroy_vs(struct priv *p)
MP_DBG(p, "destroying VS filters\n");
// Wait until our frame callbacks return.
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
p->initializing = false;
p->shutdown = true;
pthread_cond_broadcast(&p->wakeup);
mp_cond_broadcast(&p->wakeup);
while (num_requested(p))
pthread_cond_wait(&p->wakeup, &p->lock);
pthread_mutex_unlock(&p->lock);
mp_cond_wait(&p->wakeup, &p->lock);
mp_mutex_unlock(&p->lock);
MP_DBG(p, "all requests terminated\n");
@ -699,9 +699,9 @@ static int reinit_vs(struct priv *p, struct mp_image *input)
goto error;
}
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
p->initializing = false;
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
MP_DBG(p, "initialized.\n");
res = 0;
error:
@ -729,8 +729,8 @@ static void vf_vapoursynth_destroy(struct mp_filter *f)
destroy_vs(p);
p->drv->uninit(p);
pthread_cond_destroy(&p->wakeup);
pthread_mutex_destroy(&p->lock);
mp_cond_destroy(&p->wakeup);
mp_mutex_destroy(&p->lock);
mp_filter_free_children(f);
}
@ -763,8 +763,8 @@ static struct mp_filter *vf_vapoursynth_create(struct mp_filter *parent,
p->drv = p->opts->drv;
p->f = f;
pthread_mutex_init(&p->lock, NULL);
pthread_cond_init(&p->wakeup, NULL);
mp_mutex_init(&p->lock);
mp_cond_init(&p->wakeup);
if (!p->opts->file || !p->opts->file[0]) {
MP_FATAL(p, "'file' parameter must be set.\n");

View File

@ -1,14 +1,13 @@
#include <pthread.h>
#include <assert.h>
#include <libavutil/hwcontext.h>
#include "config.h"
#include "hwdec.h"
#include "osdep/threads.h"
struct mp_hwdec_devices {
pthread_mutex_t lock;
mp_mutex lock;
struct mp_hwdec_ctx **hwctxs;
int num_hwctxs;
@ -21,7 +20,7 @@ struct mp_hwdec_devices {
struct mp_hwdec_devices *hwdec_devices_create(void)
{
struct mp_hwdec_devices *devs = talloc_zero(NULL, struct mp_hwdec_devices);
pthread_mutex_init(&devs->lock, NULL);
mp_mutex_init(&devs->lock);
return devs;
}
@ -31,7 +30,7 @@ void hwdec_devices_destroy(struct mp_hwdec_devices *devs)
return;
assert(!devs->num_hwctxs); // must have been hwdec_devices_remove()ed
assert(!devs->load_api); // must have been unset
pthread_mutex_destroy(&devs->lock);
mp_mutex_destroy(&devs->lock);
talloc_free(devs);
}
@ -39,7 +38,7 @@ struct mp_hwdec_ctx *hwdec_devices_get_by_imgfmt(struct mp_hwdec_devices *devs,
int hw_imgfmt)
{
struct mp_hwdec_ctx *res = NULL;
pthread_mutex_lock(&devs->lock);
mp_mutex_lock(&devs->lock);
for (int n = 0; n < devs->num_hwctxs; n++) {
struct mp_hwdec_ctx *dev = devs->hwctxs[n];
if (dev->hw_imgfmt == hw_imgfmt) {
@ -47,7 +46,7 @@ struct mp_hwdec_ctx *hwdec_devices_get_by_imgfmt(struct mp_hwdec_devices *devs,
break;
}
}
pthread_mutex_unlock(&devs->lock);
mp_mutex_unlock(&devs->lock);
return res;
}
@ -58,29 +57,29 @@ struct mp_hwdec_ctx *hwdec_devices_get_first(struct mp_hwdec_devices *devs)
struct mp_hwdec_ctx *hwdec_devices_get_n(struct mp_hwdec_devices *devs, int n)
{
pthread_mutex_lock(&devs->lock);
mp_mutex_lock(&devs->lock);
struct mp_hwdec_ctx *res = n < devs->num_hwctxs ? devs->hwctxs[n] : NULL;
pthread_mutex_unlock(&devs->lock);
mp_mutex_unlock(&devs->lock);
return res;
}
void hwdec_devices_add(struct mp_hwdec_devices *devs, struct mp_hwdec_ctx *ctx)
{
pthread_mutex_lock(&devs->lock);
mp_mutex_lock(&devs->lock);
MP_TARRAY_APPEND(devs, devs->hwctxs, devs->num_hwctxs, ctx);
pthread_mutex_unlock(&devs->lock);
mp_mutex_unlock(&devs->lock);
}
void hwdec_devices_remove(struct mp_hwdec_devices *devs, struct mp_hwdec_ctx *ctx)
{
pthread_mutex_lock(&devs->lock);
mp_mutex_lock(&devs->lock);
for (int n = 0; n < devs->num_hwctxs; n++) {
if (devs->hwctxs[n] == ctx) {
MP_TARRAY_REMOVE_AT(devs->hwctxs, devs->num_hwctxs, n);
break;
}
}
pthread_mutex_unlock(&devs->lock);
mp_mutex_unlock(&devs->lock);
}
void hwdec_devices_set_loader(struct mp_hwdec_devices *devs,

View File

@ -16,7 +16,6 @@
*/
#include <limits.h>
#include <pthread.h>
#include <assert.h>
#include <libavutil/mem.h>
@ -37,10 +36,11 @@
#include "common/av_common.h"
#include "common/common.h"
#include "fmt-conversion.h"
#include "hwdec.h"
#include "mp_image.h"
#include "osdep/threads.h"
#include "sws_utils.h"
#include "fmt-conversion.h"
// Determine strides, plane sizes, and total required size for an image
// allocation. Returns total size on success, <0 on error. Unused planes

View File

@ -19,7 +19,6 @@
#include <stddef.h>
#include <stdbool.h>
#include <pthread.h>
#include <assert.h>
#include <libavutil/buffer.h>
@ -35,12 +34,13 @@
#include "common/common.h"
#include "fmt-conversion.h"
#include "mp_image.h"
#include "mp_image_pool.h"
#include "mp_image.h"
#include "osdep/threads.h"
static pthread_mutex_t pool_mutex = PTHREAD_MUTEX_INITIALIZER;
#define pool_lock() pthread_mutex_lock(&pool_mutex)
#define pool_unlock() pthread_mutex_unlock(&pool_mutex)
static mp_static_mutex pool_mutex = MP_STATIC_MUTEX_INITIALIZER;
#define pool_lock() mp_mutex_lock(&pool_mutex)
#define pool_unlock() mp_mutex_unlock(&pool_mutex)
// Thread-safety: the pool itself is not thread-safe, but pool-allocated images
// can be referenced and unreferenced from other threads. (As long as the image

View File

@ -92,17 +92,17 @@ struct vo_cocoa_state {
uint32_t old_dwidth;
uint32_t old_dheight;
pthread_mutex_t anim_lock;
pthread_cond_t anim_wakeup;
mp_mutex anim_lock;
mp_cond anim_wakeup;
bool is_animating;
CVDisplayLinkRef link;
pthread_mutex_t sync_lock;
pthread_cond_t sync_wakeup;
mp_mutex sync_lock;
mp_cond sync_wakeup;
uint64_t sync_counter;
pthread_mutex_t lock;
pthread_cond_t wakeup;
mp_mutex lock;
mp_cond wakeup;
// --- The following members are protected by the lock.
// If the VO and main threads are both blocked, locking is optional
@ -137,9 +137,9 @@ static void queue_new_video_size(struct vo *vo, int w, int h)
static void flag_events(struct vo *vo, int events)
{
struct vo_cocoa_state *s = vo->cocoa;
pthread_mutex_lock(&s->lock);
mp_mutex_lock(&s->lock);
s->pending_events |= events;
pthread_mutex_unlock(&s->lock);
mp_mutex_unlock(&s->lock);
if (events)
vo_wakeup(vo);
}
@ -301,26 +301,26 @@ static void vo_cocoa_update_screen_info(struct vo *vo)
static void vo_cocoa_anim_lock(struct vo *vo)
{
struct vo_cocoa_state *s = vo->cocoa;
pthread_mutex_lock(&s->anim_lock);
mp_mutex_lock(&s->anim_lock);
s->is_animating = true;
pthread_mutex_unlock(&s->anim_lock);
mp_mutex_unlock(&s->anim_lock);
}
static void vo_cocoa_anim_unlock(struct vo *vo)
{
struct vo_cocoa_state *s = vo->cocoa;
pthread_mutex_lock(&s->anim_lock);
mp_mutex_lock(&s->anim_lock);
s->is_animating = false;
pthread_cond_signal(&s->anim_wakeup);
pthread_mutex_unlock(&s->anim_lock);
mp_cond_signal(&s->anim_wakeup);
mp_mutex_unlock(&s->anim_lock);
}
static void vo_cocoa_signal_swap(struct vo_cocoa_state *s)
{
pthread_mutex_lock(&s->sync_lock);
mp_mutex_lock(&s->sync_lock);
s->sync_counter += 1;
pthread_cond_signal(&s->sync_wakeup);
pthread_mutex_unlock(&s->sync_lock);
mp_cond_signal(&s->sync_wakeup);
mp_mutex_unlock(&s->sync_lock);
}
static void vo_cocoa_start_displaylink(struct vo_cocoa_state *s)
@ -380,12 +380,12 @@ void vo_cocoa_init(struct vo *vo)
.cursor_visibility_wanted = true,
.fullscreen = 0,
};
pthread_mutex_init(&s->lock, NULL);
pthread_cond_init(&s->wakeup, NULL);
pthread_mutex_init(&s->sync_lock, NULL);
pthread_cond_init(&s->sync_wakeup, NULL);
pthread_mutex_init(&s->anim_lock, NULL);
pthread_cond_init(&s->anim_wakeup, NULL);
mp_mutex_init(&s->lock);
mp_cond_init(&s->wakeup);
mp_mutex_init(&s->sync_lock);
mp_cond_init(&s->sync_wakeup);
mp_mutex_init(&s->anim_lock);
mp_cond_init(&s->anim_wakeup);
vo->cocoa = s;
vo_cocoa_update_screen_info(vo);
vo_cocoa_init_displaylink(vo);
@ -427,15 +427,15 @@ void vo_cocoa_uninit(struct vo *vo)
{
struct vo_cocoa_state *s = vo->cocoa;
pthread_mutex_lock(&s->lock);
mp_mutex_lock(&s->lock);
s->vo_ready = false;
pthread_cond_signal(&s->wakeup);
pthread_mutex_unlock(&s->lock);
mp_cond_signal(&s->wakeup);
mp_mutex_unlock(&s->lock);
pthread_mutex_lock(&s->anim_lock);
mp_mutex_lock(&s->anim_lock);
while(s->is_animating)
pthread_cond_wait(&s->anim_wakeup, &s->anim_lock);
pthread_mutex_unlock(&s->anim_lock);
mp_cond_wait(&s->anim_wakeup, &s->anim_lock);
mp_mutex_unlock(&s->anim_lock);
// close window beforehand to prevent undefined behavior when in fullscreen
// that resets the desktop to space 1
@ -466,12 +466,12 @@ void vo_cocoa_uninit(struct vo *vo)
[s->view removeFromSuperview];
[s->view release];
pthread_cond_destroy(&s->anim_wakeup);
pthread_mutex_destroy(&s->anim_lock);
pthread_cond_destroy(&s->sync_wakeup);
pthread_mutex_destroy(&s->sync_lock);
pthread_cond_destroy(&s->wakeup);
pthread_mutex_destroy(&s->lock);
mp_cond_destroy(&s->anim_wakeup);
mp_mutex_destroy(&s->anim_lock);
mp_cond_destroy(&s->sync_wakeup);
mp_mutex_destroy(&s->sync_lock);
mp_cond_destroy(&s->wakeup);
mp_mutex_destroy(&s->lock);
talloc_free(s);
});
}
@ -764,13 +764,13 @@ static void resize_event(struct vo *vo)
struct vo_cocoa_state *s = vo->cocoa;
NSRect frame = [s->video frameInPixels];
pthread_mutex_lock(&s->lock);
mp_mutex_lock(&s->lock);
s->vo_dwidth = frame.size.width;
s->vo_dheight = frame.size.height;
s->pending_events |= VO_EVENT_RESIZE | VO_EVENT_EXPOSE;
// Live-resizing: make sure at least one frame will be drawn
s->frame_w = s->frame_h = 0;
pthread_mutex_unlock(&s->lock);
mp_mutex_unlock(&s->lock);
[s->nsgl_ctx update];
@ -783,17 +783,17 @@ static void vo_cocoa_resize_redraw(struct vo *vo, int width, int height)
resize_event(vo);
pthread_mutex_lock(&s->lock);
mp_mutex_lock(&s->lock);
// Wait until a new frame with the new size was rendered. For some reason,
// Cocoa requires this to be done before drawRect() returns.
struct timespec e = mp_time_ns_to_realtime(mp_time_ns_add(mp_time_ns(), 0.1));
int64_t e = mp_time_ns_add(mp_time_ns(), 0.1);
while (s->frame_w != width && s->frame_h != height && s->vo_ready) {
if (pthread_cond_timedwait(&s->wakeup, &s->lock, &e))
if (mp_cond_timedwait_until(&s->wakeup, &s->lock, e))
break;
}
pthread_mutex_unlock(&s->lock);
mp_mutex_unlock(&s->lock);
}
void vo_cocoa_swap_buffers(struct vo *vo)
@ -801,38 +801,38 @@ void vo_cocoa_swap_buffers(struct vo *vo)
struct vo_cocoa_state *s = vo->cocoa;
// Don't swap a frame with wrong size
pthread_mutex_lock(&s->lock);
mp_mutex_lock(&s->lock);
bool skip = s->pending_events & VO_EVENT_RESIZE;
pthread_mutex_unlock(&s->lock);
mp_mutex_unlock(&s->lock);
if (skip)
return;
pthread_mutex_lock(&s->sync_lock);
mp_mutex_lock(&s->sync_lock);
uint64_t old_counter = s->sync_counter;
while(CVDisplayLinkIsRunning(s->link) && old_counter == s->sync_counter) {
pthread_cond_wait(&s->sync_wakeup, &s->sync_lock);
mp_cond_wait(&s->sync_wakeup, &s->sync_lock);
}
pthread_mutex_unlock(&s->sync_lock);
mp_mutex_unlock(&s->sync_lock);
pthread_mutex_lock(&s->lock);
mp_mutex_lock(&s->lock);
s->frame_w = vo->dwidth;
s->frame_h = vo->dheight;
pthread_cond_signal(&s->wakeup);
pthread_mutex_unlock(&s->lock);
mp_cond_signal(&s->wakeup);
mp_mutex_unlock(&s->lock);
}
static int vo_cocoa_check_events(struct vo *vo)
{
struct vo_cocoa_state *s = vo->cocoa;
pthread_mutex_lock(&s->lock);
mp_mutex_lock(&s->lock);
int events = s->pending_events;
s->pending_events = 0;
if (events & VO_EVENT_RESIZE) {
vo->dwidth = s->vo_dwidth;
vo->dheight = s->vo_dheight;
}
pthread_mutex_unlock(&s->lock);
mp_mutex_unlock(&s->lock);
return events;
}

View File

@ -1,20 +1,20 @@
#include <assert.h>
#include <pthread.h>
#include <stdatomic.h>
#include <stdlib.h>
#include <libavutil/buffer.h>
#include "mpv_talloc.h"
#include "misc/dispatch.h"
#include "mpv_talloc.h"
#include "osdep/threads.h"
#include "video/mp_image.h"
#include "dr_helper.h"
struct dr_helper {
pthread_mutex_t thread_lock;
pthread_t thread;
bool thread_valid; // (POSIX defines no "unset" pthread_t value yet)
mp_mutex thread_lock;
mp_thread thread;
bool thread_valid; // (POSIX defines no "unset" mp_thread value yet)
struct mp_dispatch_queue *dispatch;
atomic_ullong dr_in_flight;
@ -32,7 +32,7 @@ static void dr_helper_destroy(void *ptr)
// dangling pointers.
assert(atomic_load(&dr->dr_in_flight) == 0);
pthread_mutex_destroy(&dr->thread_lock);
mp_mutex_destroy(&dr->thread_lock);
}
struct dr_helper *dr_helper_create(struct mp_dispatch_queue *dispatch,
@ -48,27 +48,27 @@ struct dr_helper *dr_helper_create(struct mp_dispatch_queue *dispatch,
.get_image = get_image,
.get_image_ctx = get_image_ctx,
};
pthread_mutex_init(&dr->thread_lock, NULL);
mp_mutex_init(&dr->thread_lock);
return dr;
}
void dr_helper_acquire_thread(struct dr_helper *dr)
{
pthread_mutex_lock(&dr->thread_lock);
mp_mutex_lock(&dr->thread_lock);
assert(!dr->thread_valid); // fails on API user errors
dr->thread_valid = true;
dr->thread = pthread_self();
pthread_mutex_unlock(&dr->thread_lock);
dr->thread = mp_thread_self();
mp_mutex_unlock(&dr->thread_lock);
}
void dr_helper_release_thread(struct dr_helper *dr)
{
pthread_mutex_lock(&dr->thread_lock);
mp_mutex_lock(&dr->thread_lock);
// Fails on API user errors.
assert(dr->thread_valid);
assert(pthread_equal(dr->thread, pthread_self()));
assert(mp_thread_equal(dr->thread, mp_thread_self()));
dr->thread_valid = false;
pthread_mutex_unlock(&dr->thread_lock);
mp_mutex_unlock(&dr->thread_lock);
}
struct free_dr_context {
@ -92,10 +92,10 @@ static void free_dr_buffer_on_dr_thread(void *opaque, uint8_t *data)
struct free_dr_context *ctx = opaque;
struct dr_helper *dr = ctx->dr;
pthread_mutex_lock(&dr->thread_lock);
mp_mutex_lock(&dr->thread_lock);
bool on_this_thread =
dr->thread_valid && pthread_equal(ctx->dr->thread, pthread_self());
pthread_mutex_unlock(&dr->thread_lock);
dr->thread_valid && mp_thread_equal(ctx->dr->thread, mp_thread_self());
mp_mutex_unlock(&dr->thread_lock);
// The image could be unreffed even on the DR thread. In practice, this
// matters most on DR destruction.

View File

@ -19,7 +19,7 @@ struct dr_helper *dr_helper_create(struct mp_dispatch_queue *dispatch,
void *get_image_ctx);
// Make DR release calls (freeing images) reentrant if they are called on this
// (pthread_self()) thread. That means any free call will directly release the
// (mp_thread_self()) thread. That means any free call will directly release the
// image as allocated with get_image().
// Only 1 thread can use this at a time. Note that it would make no sense to
// call this on more than 1 thread, as get_image is assumed not thread-safe.

View File

@ -19,12 +19,12 @@
#include <d3d11.h>
#include <dxgi1_6.h>
#include <versionhelpers.h>
#include <pthread.h>
#include "common/common.h"
#include "common/msg.h"
#include "misc/bstr.h"
#include "osdep/io.h"
#include "osdep/threads.h"
#include "osdep/windows_utils.h"
#include "d3d11_helpers.h"
@ -33,7 +33,7 @@
#define DXGI_ADAPTER_FLAG_SOFTWARE (2)
typedef HRESULT(WINAPI *PFN_CREATE_DXGI_FACTORY)(REFIID riid, void **ppFactory);
static pthread_once_t d3d11_once = PTHREAD_ONCE_INIT;
static mp_once d3d11_once = MP_STATIC_ONCE_INITIALIZER;
static PFN_D3D11_CREATE_DEVICE pD3D11CreateDevice = NULL;
static PFN_CREATE_DXGI_FACTORY pCreateDXGIFactory1 = NULL;
static void d3d11_load(void)
@ -51,7 +51,7 @@ static void d3d11_load(void)
static bool load_d3d11_functions(struct mp_log *log)
{
pthread_once(&d3d11_once, d3d11_load);
mp_exec_once(&d3d11_once, d3d11_load);
if (!pD3D11CreateDevice || !pCreateDXGIFactory1) {
mp_fatal(log, "Failed to load base d3d11 functionality: "
"CreateDevice: %s, CreateDXGIFactory1: %s\n",

View File

@ -18,7 +18,6 @@
*/
#include <assert.h>
#include <pthread.h>
#include <dlfcn.h>
#include <EGL/egl.h>
#include <media/NdkImageReader.h>
@ -28,6 +27,7 @@
#include <libavutil/hwcontext_mediacodec.h>
#include "misc/jni.h"
#include "osdep/threads.h"
#include "osdep/timer.h"
#include "video/out/gpu/hwdec.h"
#include "video/out/opengl/ra_gl.h"
@ -63,8 +63,8 @@ struct priv {
AImage *image;
EGLImageKHR egl_image;
pthread_mutex_t lock;
pthread_cond_t cond;
mp_mutex lock;
mp_cond cond;
bool image_available;
EGLImageKHR (EGLAPIENTRY *CreateImageKHR)(
@ -218,10 +218,10 @@ static void image_callback(void *context, AImageReader *reader)
{
struct priv *p = context;
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
p->image_available = true;
pthread_cond_signal(&p->cond);
pthread_mutex_unlock(&p->lock);
mp_cond_signal(&p->cond);
mp_mutex_unlock(&p->lock);
}
static int mapper_init(struct ra_hwdec_mapper *mapper)
@ -231,8 +231,8 @@ static int mapper_init(struct ra_hwdec_mapper *mapper)
GL *gl = ra_gl_get(mapper->ra);
p->log = mapper->log;
pthread_mutex_init(&p->lock, NULL);
pthread_cond_init(&p->cond, NULL);
mp_mutex_init(&p->lock);
mp_cond_init(&p->cond);
p->CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR");
p->DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR");
@ -298,8 +298,8 @@ static void mapper_uninit(struct ra_hwdec_mapper *mapper)
ra_tex_free(mapper->ra, &mapper->tex[0]);
pthread_mutex_destroy(&p->lock);
pthread_cond_destroy(&p->cond);
mp_mutex_destroy(&p->lock);
mp_cond_destroy(&p->cond);
}
static void mapper_unmap(struct ra_hwdec_mapper *mapper)
@ -332,16 +332,15 @@ static int mapper_map(struct ra_hwdec_mapper *mapper)
}
bool image_available = false;
pthread_mutex_lock(&p->lock);
mp_mutex_lock(&p->lock);
if (!p->image_available) {
struct timespec ts = mp_rel_time_to_timespec(0.1);
pthread_cond_timedwait(&p->cond, &p->lock, &ts);
mp_cond_timedwait(&p->cond, &p->lock, MP_TIME_MS_TO_NS(100));
if (!p->image_available)
MP_WARN(mapper, "Waiting for frame timed out!\n");
}
image_available = p->image_available;
p->image_available = false;
pthread_mutex_unlock(&p->lock);
mp_mutex_unlock(&p->lock);
media_status_t ret = o->AImageReader_acquireLatestImage(o->reader, &p->image);
if (ret != AMEDIA_OK) {

View File

@ -1,9 +1,9 @@
#include <pthread.h>
#include <windows.h>
#include "angle_dynamic.h"
#include "common/common.h"
#include "osdep/threads.h"
#if HAVE_EGL_ANGLE_LIB
bool angle_load(void)
@ -16,7 +16,7 @@ bool angle_load(void)
ANGLE_FNS(ANGLE_DECL)
static bool angle_loaded;
static pthread_once_t angle_load_once = PTHREAD_ONCE_INIT;
static mp_once angle_load_once = MP_STATIC_ONCE_INITIALIZER;
static void angle_do_load(void)
{
@ -33,7 +33,7 @@ static void angle_do_load(void)
bool angle_load(void)
{
pthread_once(&angle_load_once, angle_do_load);
mp_exec_once(&angle_load_once, angle_do_load);
return angle_loaded;
}
#endif

View File

@ -17,7 +17,6 @@
#include <assert.h>
#include <math.h>
#include <pthread.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdio.h>
@ -122,13 +121,13 @@ static const struct vo_driver *const video_out_drivers[] =
};
struct vo_internal {
pthread_t thread;
mp_thread thread;
struct mp_dispatch_queue *dispatch;
struct dr_helper *dr_helper;
// --- The following fields are protected by lock
pthread_mutex_t lock;
pthread_cond_t wakeup;
mp_mutex lock;
mp_cond wakeup;
bool need_wakeup;
bool terminate;
@ -181,7 +180,7 @@ struct vo_internal {
extern const struct m_sub_options gl_video_conf;
static void forget_frames(struct vo *vo);
static void *vo_thread(void *ptr);
static MP_THREAD_VOID vo_thread(void *ptr);
static bool get_desc(struct m_obj_desc *dst, int index)
{
@ -229,9 +228,9 @@ static void read_opts(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->timing_offset = (uint64_t)(vo->opts->timing_offset * 1e9);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
static void update_opts(void *p)
@ -272,8 +271,8 @@ static void dealloc_vo(struct vo *vo)
talloc_free(vo->gl_opts_cache);
talloc_free(vo->eq_opts_cache);
pthread_mutex_destroy(&vo->in->lock);
pthread_cond_destroy(&vo->in->wakeup);
mp_mutex_destroy(&vo->in->lock);
mp_cond_destroy(&vo->in->wakeup);
talloc_free(vo);
}
@ -310,8 +309,8 @@ static struct vo *vo_create(bool probing, struct mpv_global *global,
.stats = stats_ctx_create(vo, global, "vo"),
};
mp_dispatch_set_wakeup_fn(vo->in->dispatch, dispatch_wakeup_cb, vo);
pthread_mutex_init(&vo->in->lock, NULL);
pthread_cond_init(&vo->in->wakeup, NULL);
mp_mutex_init(&vo->in->lock);
mp_cond_init(&vo->in->wakeup);
vo->opts_cache = m_config_cache_alloc(NULL, global, &vo_sub_opts);
vo->opts = vo->opts_cache->opts;
@ -334,10 +333,10 @@ static struct vo *vo_create(bool probing, struct mpv_global *global,
if (!vo->priv)
goto error;
if (pthread_create(&vo->in->thread, NULL, vo_thread, vo))
if (mp_thread_create(&vo->in->thread, vo_thread, vo))
goto error;
if (mp_rendezvous(vo, 0) < 0) { // init barrier
pthread_join(vo->in->thread, NULL);
mp_thread_join(vo->in->thread);
goto error;
}
return vo;
@ -391,7 +390,7 @@ void vo_destroy(struct vo *vo)
{
struct vo_internal *in = vo->in;
mp_dispatch_run(in->dispatch, terminate_vo, vo);
pthread_join(vo->in->thread, NULL);
mp_thread_join(vo->in->thread);
dealloc_vo(vo);
}
@ -551,16 +550,16 @@ static void update_vsync_timing_after_swap(struct vo *vo,
static void update_display_fps(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
if (in->internal_events & VO_EVENT_WIN_STATE) {
in->internal_events &= ~(unsigned)VO_EVENT_WIN_STATE;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
double fps = 0;
vo->driver->control(vo, VOCTRL_GET_DISPLAY_FPS, &fps);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->reported_display_fps = fps;
}
@ -581,7 +580,7 @@ static void update_display_fps(struct vo *vo)
wakeup_core(vo);
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
static void check_vo_caps(struct vo *vo)
@ -629,12 +628,12 @@ static void run_reconfig(void *p)
vo->params = NULL;
}
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
talloc_free(in->current_frame);
in->current_frame = NULL;
forget_frames(vo);
reset_vsync_timings(vo);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
update_display_fps(vo);
}
@ -723,12 +722,10 @@ void vo_wait_default(struct vo *vo, int64_t until_time)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
if (!in->need_wakeup) {
struct timespec ts = mp_time_ns_to_realtime(until_time);
pthread_cond_timedwait(&in->wakeup, &in->lock, &ts);
}
pthread_mutex_unlock(&in->lock);
mp_mutex_lock(&in->lock);
if (!in->need_wakeup)
mp_cond_timedwait_until(&in->wakeup, &in->lock, until_time);
mp_mutex_unlock(&in->lock);
}
// Called unlocked.
@ -741,16 +738,16 @@ static void wait_vo(struct vo *vo, int64_t until_time)
} else {
vo_wait_default(vo, until_time);
}
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->need_wakeup = false;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
static void wakeup_locked(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_cond_broadcast(&in->wakeup);
mp_cond_broadcast(&in->wakeup);
if (vo->driver->wakeup)
vo->driver->wakeup(vo);
in->need_wakeup = true;
@ -762,9 +759,9 @@ void vo_wakeup(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
wakeup_locked(vo);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
// Whether vo_queue_frame() can be called. If the VO is not ready yet, the
@ -778,7 +775,7 @@ void vo_wakeup(struct vo *vo)
bool vo_is_ready_for_frame(struct vo *vo, int64_t next_pts)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
bool blocked = vo->driver->initially_blocked &&
!(in->internal_events & VO_EVENT_INITIAL_UNBLOCK);
bool r = vo->config_ok && !in->frame_queued && !blocked &&
@ -800,7 +797,7 @@ bool vo_is_ready_for_frame(struct vo *vo, int64_t next_pts)
wakeup_locked(vo);
}
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return r;
}
@ -810,7 +807,7 @@ bool vo_is_ready_for_frame(struct vo *vo, int64_t next_pts)
void vo_queue_frame(struct vo *vo, struct vo_frame *frame)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
assert(vo->config_ok && !in->frame_queued &&
(!in->current_frame || in->current_frame->num_vsyncs < 1));
in->hasframe = true;
@ -819,7 +816,7 @@ void vo_queue_frame(struct vo *vo, struct vo_frame *frame)
in->wakeup_pts = frame->display_synced
? 0 : frame->pts + MPMAX(frame->duration, 0);
wakeup_locked(vo);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
// If a frame is currently being rendered (or queued), wait until it's done.
@ -827,10 +824,10 @@ void vo_queue_frame(struct vo *vo, struct vo_frame *frame)
void vo_wait_frame(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
while (in->frame_queued || in->rendering)
pthread_cond_wait(&in->wakeup, &in->lock);
pthread_mutex_unlock(&in->lock);
mp_cond_wait(&in->wakeup, &in->lock);
mp_mutex_unlock(&in->lock);
}
// Wait until realtime is >= ts
@ -838,15 +835,14 @@ void vo_wait_frame(struct vo *vo)
static void wait_until(struct vo *vo, int64_t target)
{
struct vo_internal *in = vo->in;
struct timespec ts = mp_time_ns_to_realtime(target);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
while (target > mp_time_ns()) {
if (in->queued_events & VO_EVENT_LIVE_RESIZING)
break;
if (pthread_cond_timedwait(&in->wakeup, &in->lock, &ts))
if (mp_cond_timedwait_until(&in->wakeup, &in->lock, target))
break;
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
static bool render_frame(struct vo *vo)
@ -857,7 +853,7 @@ static bool render_frame(struct vo *vo)
update_display_fps(vo);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
if (in->frame_queued) {
talloc_free(in->current_frame);
@ -931,7 +927,7 @@ static bool render_frame(struct vo *vo)
// timer instead, but possibly benefits from preparing a frame early.
bool can_queue = !in->frame_queued &&
(in->current_frame->num_vsyncs < 1 || !use_vsync);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
if (can_queue)
wakeup_core(vo);
@ -961,7 +957,7 @@ static bool render_frame(struct vo *vo)
stats_time_end(in->stats, "video-flip");
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->dropped_frame = prev_drop_count < vo->in->drop_count;
in->rendering = false;
@ -993,13 +989,13 @@ static bool render_frame(struct vo *vo)
if (in->frame_queued && in->frame_queued->display_synced)
more_frames = true;
pthread_cond_broadcast(&in->wakeup); // for vo_wait_frame()
mp_cond_broadcast(&in->wakeup); // for vo_wait_frame()
wakeup_core(vo);
done:
if (!vo->driver->frame_owner)
talloc_free(frame);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return more_frames;
}
@ -1011,7 +1007,7 @@ static void do_redraw(struct vo *vo)
if (!vo->config_ok || (vo->driver->caps & VO_CAP_NORETAIN))
return;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->request_redraw = false;
bool full_redraw = in->dropped_frame;
struct vo_frame *frame = NULL;
@ -1027,7 +1023,7 @@ static void do_redraw(struct vo *vo)
frame->still = true;
frame->pts = 0;
frame->duration = -1;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
vo->driver->draw_frame(vo, frame);
vo->driver->flip_page(vo);
@ -1043,13 +1039,13 @@ static struct mp_image *get_image_vo(void *ctx, int imgfmt, int w, int h,
return vo->driver->get_image(vo, imgfmt, w, h, stride_align, flags);
}
static void *vo_thread(void *ptr)
static MP_THREAD_VOID vo_thread(void *ptr)
{
struct vo *vo = ptr;
struct vo_internal *in = vo->in;
bool vo_paused = false;
mpthread_set_name("vo");
mp_thread_set_name("vo");
if (vo->driver->get_image) {
in->dr_helper = dr_helper_create(in->dispatch, get_image_vo, vo);
@ -1075,7 +1071,7 @@ static void *vo_thread(void *ptr)
int64_t now = mp_time_ns();
int64_t wait_until = now + (working ? 0 : (int64_t)1e9);
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
if (in->wakeup_pts) {
if (in->wakeup_pts > now) {
wait_until = MPMIN(wait_until, in->wakeup_pts);
@ -1094,7 +1090,7 @@ static void *vo_thread(void *ptr)
in->send_reset = false;
bool send_pause = in->paused != vo_paused;
vo_paused = in->paused;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
if (send_reset)
vo->driver->control(vo, VOCTRL_RESET, NULL);
@ -1118,13 +1114,13 @@ static void *vo_thread(void *ptr)
vo->driver->uninit(vo);
done:
TA_FREEP(&in->dr_helper);
return NULL;
MP_THREAD_RETURN();
}
void vo_set_paused(struct vo *vo, bool paused)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
if (in->paused != paused) {
in->paused = paused;
if (in->paused && in->dropped_frame) {
@ -1134,55 +1130,55 @@ void vo_set_paused(struct vo *vo, bool paused)
reset_vsync_timings(vo);
wakeup_locked(vo);
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
int64_t vo_get_drop_count(struct vo *vo)
{
pthread_mutex_lock(&vo->in->lock);
mp_mutex_lock(&vo->in->lock);
int64_t r = vo->in->drop_count;
pthread_mutex_unlock(&vo->in->lock);
mp_mutex_unlock(&vo->in->lock);
return r;
}
void vo_increment_drop_count(struct vo *vo, int64_t n)
{
pthread_mutex_lock(&vo->in->lock);
mp_mutex_lock(&vo->in->lock);
vo->in->drop_count += n;
pthread_mutex_unlock(&vo->in->lock);
mp_mutex_unlock(&vo->in->lock);
}
// Make the VO redraw the OSD at some point in the future.
void vo_redraw(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
if (!in->request_redraw) {
in->request_redraw = true;
in->want_redraw = false;
wakeup_locked(vo);
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
bool vo_want_redraw(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
bool r = in->want_redraw;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return r;
}
void vo_seek_reset(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
forget_frames(vo);
reset_vsync_timings(vo);
in->send_reset = true;
wakeup_locked(vo);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
// Return true if there is still a frame being displayed (or queued).
@ -1190,9 +1186,9 @@ void vo_seek_reset(struct vo *vo)
bool vo_still_displaying(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
bool working = in->rendering || in->frame_queued;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return working && in->hasframe;
}
@ -1245,45 +1241,45 @@ void vo_get_src_dst_rects(struct vo *vo, struct mp_rect *out_src,
void vo_set_queue_params(struct vo *vo, int64_t offset_ns, int num_req_frames)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
in->flip_queue_offset = offset_ns;
in->req_frames = MPCLAMP(num_req_frames, 1, VO_MAX_REQ_FRAMES);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
int vo_get_num_req_frames(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
int res = in->req_frames;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return res;
}
double vo_get_vsync_interval(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
double res = vo->in->vsync_interval > 1 ? vo->in->vsync_interval : -1;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return res;
}
double vo_get_estimated_vsync_interval(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
double res = in->estimated_vsync_interval;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return res;
}
double vo_get_estimated_vsync_jitter(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
double res = in->estimated_vsync_jitter;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return res;
}
@ -1296,7 +1292,7 @@ double vo_get_estimated_vsync_jitter(struct vo *vo)
double vo_get_delay(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
assert (!in->frame_queued);
int64_t res = 0;
if (in->base_vsync && in->vsync_interval > 1 && in->current_frame) {
@ -1306,33 +1302,33 @@ double vo_get_delay(struct vo *vo)
if (!in->current_frame->display_synced)
res = 0;
}
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return res ? (res - mp_time_ns()) / 1e9 : 0;
}
void vo_discard_timing_info(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
reset_vsync_timings(vo);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
int64_t vo_get_delayed_count(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
int64_t res = vo->in->delayed_count;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return res;
}
double vo_get_display_fps(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
double res = vo->in->display_fps;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return res;
}
@ -1341,14 +1337,14 @@ double vo_get_display_fps(struct vo *vo)
void vo_event(struct vo *vo, int event)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
if ((in->queued_events & event & VO_EVENTS_USER) != (event & VO_EVENTS_USER))
wakeup_core(vo);
if (event)
wakeup_locked(vo);
in->queued_events |= event;
in->internal_events |= event;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
}
// Check event flags set with vo_event(). Return the mask of events that was
@ -1356,30 +1352,30 @@ void vo_event(struct vo *vo, int event)
int vo_query_and_reset_events(struct vo *vo, int events)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
int r = in->queued_events & events;
in->queued_events &= ~(unsigned)r;
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return r;
}
struct mp_image *vo_get_current_frame(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
struct mp_image *r = NULL;
if (vo->in->current_frame)
r = mp_image_new_ref(vo->in->current_frame->current);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return r;
}
struct vo_frame *vo_get_current_vo_frame(struct vo *vo)
{
struct vo_internal *in = vo->in;
pthread_mutex_lock(&in->lock);
mp_mutex_lock(&in->lock);
struct vo_frame *r = vo_frame_ref(vo->in->current_frame);
pthread_mutex_unlock(&in->lock);
mp_mutex_unlock(&in->lock);
return r;
}

View File

@ -17,7 +17,6 @@
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#include <pthread.h>
#include <libplacebo/colorspace.h>
#include <libplacebo/options.h>
#include <libplacebo/renderer.h>
@ -31,6 +30,7 @@
#include "options/m_config.h"
#include "options/path.h"
#include "osdep/io.h"
#include "osdep/threads.h"
#include "stream/stream.h"
#include "video/fmt-conversion.h"
#include "video/mp_image.h"
@ -96,7 +96,7 @@ struct priv {
struct ra_hwdec_mapper *hwdec_mapper;
// Allocated DR buffers
pthread_mutex_t dr_lock;
mp_mutex dr_lock;
pl_buf *dr_buffers;
int num_dr_buffers;
@ -162,30 +162,30 @@ static void update_lut(struct priv *p, struct user_lut *lut);
static pl_buf get_dr_buf(struct priv *p, const uint8_t *ptr)
{
pthread_mutex_lock(&p->dr_lock);
mp_mutex_lock(&p->dr_lock);
for (int i = 0; i < p->num_dr_buffers; i++) {
pl_buf buf = p->dr_buffers[i];
if (ptr >= buf->data && ptr < buf->data + buf->params.size) {
pthread_mutex_unlock(&p->dr_lock);
mp_mutex_unlock(&p->dr_lock);
return buf;
}
}
pthread_mutex_unlock(&p->dr_lock);
mp_mutex_unlock(&p->dr_lock);
return NULL;
}
static void free_dr_buf(void *opaque, uint8_t *data)
{
struct priv *p = opaque;
pthread_mutex_lock(&p->dr_lock);
mp_mutex_lock(&p->dr_lock);
for (int i = 0; i < p->num_dr_buffers; i++) {
if (p->dr_buffers[i]->data == data) {
pl_buf_destroy(p->gpu, &p->dr_buffers[i]);
MP_TARRAY_REMOVE_AT(p->dr_buffers, p->num_dr_buffers, i);
pthread_mutex_unlock(&p->dr_lock);
mp_mutex_unlock(&p->dr_lock);
return;
}
}
@ -227,9 +227,9 @@ static struct mp_image *get_image(struct vo *vo, int imgfmt, int w, int h,
return NULL;
}
pthread_mutex_lock(&p->dr_lock);
mp_mutex_lock(&p->dr_lock);
MP_TARRAY_APPEND(p, p->dr_buffers, p->num_dr_buffers, buf);
pthread_mutex_unlock(&p->dr_lock);
mp_mutex_unlock(&p->dr_lock);
return mpi;
}
@ -1614,7 +1614,7 @@ static void uninit(struct vo *vo)
}
assert(p->num_dr_buffers == 0);
pthread_mutex_destroy(&p->dr_lock);
mp_mutex_destroy(&p->dr_lock);
save_cache_files(p);
pl_cache_destroy(&p->shader_cache);
@ -1668,7 +1668,7 @@ static int preinit(struct vo *vo)
vo->hwdec_devs = hwdec_devices_create();
hwdec_devices_set_loader(vo->hwdec_devs, load_hwdec_api, vo);
ra_hwdec_ctx_init(&p->hwdec_ctx, vo->hwdec_devs, gl_opts->hwdec_interop, false);
pthread_mutex_init(&p->dr_lock, NULL);
mp_mutex_init(&p->dr_lock);
p->shader_cache = pl_cache_create(pl_cache_params(
.log = p->pllog,

View File

@ -192,7 +192,7 @@ static void draw_frame(struct vo *vo, struct vo_frame *voframe)
return;
// Lock for shared timestamp fields.
pthread_mutex_lock(&ectx->lock);
mp_mutex_lock(&ectx->lock);
double pts = mpi->pts;
double outpts = pts;
@ -222,7 +222,7 @@ static void draw_frame(struct vo *vo, struct vo_frame *voframe)
ectx->next_in_pts = nextpts;
}
pthread_mutex_unlock(&ectx->lock);
mp_mutex_unlock(&ectx->lock);
AVFrame *frame = mp_image_to_av_frame(mpi);
MP_HANDLE_OOM(frame);

View File

@ -1,7 +1,6 @@
#include <assert.h>
#include <limits.h>
#include <math.h>
#include <pthread.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdio.h>
@ -20,6 +19,7 @@
#include "vo.h"
#include "video/mp_image.h"
#include "sub/osd.h"
#include "osdep/threads.h"
#include "osdep/timer.h"
#include "common/global.h"
@ -65,20 +65,20 @@ struct mpv_render_context {
bool advanced_control;
struct dr_helper *dr; // NULL if advanced_control disabled
pthread_mutex_t control_lock;
mp_mutex control_lock;
// --- Protected by control_lock
mp_render_cb_control_fn control_cb;
void *control_cb_ctx;
pthread_mutex_t update_lock;
pthread_cond_t update_cond; // paired with update_lock
mp_mutex update_lock;
mp_cond update_cond; // paired with update_lock
// --- Protected by update_lock
mpv_render_update_fn update_cb;
void *update_cb_ctx;
pthread_mutex_t lock;
pthread_cond_t video_wait; // paired with lock
mp_mutex lock;
mp_cond video_wait; // paired with lock
// --- Protected by lock
struct vo_frame *next_frame; // next frame to draw
@ -115,12 +115,12 @@ const struct render_backend_fns *render_backends[] = {
static void update(struct mpv_render_context *ctx)
{
pthread_mutex_lock(&ctx->update_lock);
mp_mutex_lock(&ctx->update_lock);
if (ctx->update_cb)
ctx->update_cb(ctx->update_cb_ctx);
pthread_cond_broadcast(&ctx->update_cond);
pthread_mutex_unlock(&ctx->update_lock);
mp_cond_broadcast(&ctx->update_cond);
mp_mutex_unlock(&ctx->update_lock);
}
void *get_mpv_render_param(mpv_render_param *params, mpv_render_param_type type,
@ -135,7 +135,7 @@ void *get_mpv_render_param(mpv_render_param *params, mpv_render_param_type type,
static void forget_frames(struct mpv_render_context *ctx, bool all)
{
pthread_cond_broadcast(&ctx->video_wait);
mp_cond_broadcast(&ctx->video_wait);
if (all) {
talloc_free(ctx->cur_frame);
ctx->cur_frame = NULL;
@ -161,11 +161,11 @@ int mpv_render_context_create(mpv_render_context **res, mpv_handle *mpv,
mpv_render_param *params)
{
mpv_render_context *ctx = talloc_zero(NULL, mpv_render_context);
pthread_mutex_init(&ctx->control_lock, NULL);
pthread_mutex_init(&ctx->lock, NULL);
pthread_mutex_init(&ctx->update_lock, NULL);
pthread_cond_init(&ctx->update_cond, NULL);
pthread_cond_init(&ctx->video_wait, NULL);
mp_mutex_init(&ctx->control_lock);
mp_mutex_init(&ctx->lock);
mp_mutex_init(&ctx->update_lock);
mp_cond_init(&ctx->update_cond);
mp_cond_init(&ctx->video_wait);
ctx->global = mp_client_get_global(mpv);
ctx->client_api = ctx->global->client_api;
@ -227,22 +227,22 @@ void mpv_render_context_set_update_callback(mpv_render_context *ctx,
mpv_render_update_fn callback,
void *callback_ctx)
{
pthread_mutex_lock(&ctx->update_lock);
mp_mutex_lock(&ctx->update_lock);
ctx->update_cb = callback;
ctx->update_cb_ctx = callback_ctx;
if (ctx->update_cb)
ctx->update_cb(ctx->update_cb_ctx);
pthread_mutex_unlock(&ctx->update_lock);
mp_mutex_unlock(&ctx->update_lock);
}
void mp_render_context_set_control_callback(mpv_render_context *ctx,
mp_render_cb_control_fn callback,
void *callback_ctx)
{
pthread_mutex_lock(&ctx->control_lock);
mp_mutex_lock(&ctx->control_lock);
ctx->control_cb = callback;
ctx->control_cb_ctx = callback_ctx;
pthread_mutex_unlock(&ctx->control_lock);
mp_mutex_unlock(&ctx->control_lock);
}
void mpv_render_context_free(mpv_render_context *ctx)
@ -282,13 +282,13 @@ void mpv_render_context_free(mpv_render_context *ctx)
}
}
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
// Barrier - guarantee uninit() has left the lock region. It will access ctx
// until the lock has been released, so we must not proceed with destruction
// before we can acquire the lock. (The opposite, uninit() acquiring the
// lock, can not happen anymore at this point - we've waited for VO uninit,
// and prevented that new VOs can be created.)
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
assert(!atomic_load(&ctx->in_use));
assert(!ctx->vo);
@ -311,11 +311,11 @@ void mpv_render_context_free(mpv_render_context *ctx)
talloc_free(ctx->dr);
talloc_free(ctx->dispatch);
pthread_cond_destroy(&ctx->update_cond);
pthread_cond_destroy(&ctx->video_wait);
pthread_mutex_destroy(&ctx->update_lock);
pthread_mutex_destroy(&ctx->lock);
pthread_mutex_destroy(&ctx->control_lock);
mp_cond_destroy(&ctx->update_cond);
mp_cond_destroy(&ctx->video_wait);
mp_mutex_destroy(&ctx->update_lock);
mp_mutex_destroy(&ctx->lock);
mp_mutex_destroy(&ctx->control_lock);
talloc_free(ctx);
}
@ -331,7 +331,7 @@ bool mp_render_context_acquire(mpv_render_context *ctx)
int mpv_render_context_render(mpv_render_context *ctx, mpv_render_param *params)
{
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
int do_render =
!GET_MPV_RENDER_PARAM(params, MPV_RENDER_PARAM_SKIP_RENDERING, int, 0);
@ -341,7 +341,7 @@ int mpv_render_context_render(mpv_render_context *ctx, mpv_render_param *params)
int err = ctx->renderer->fns->get_target_size(ctx->renderer, params,
&vp_w, &vp_h);
if (err < 0) {
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return err;
}
@ -385,7 +385,7 @@ int mpv_render_context_render(mpv_render_context *ctx, mpv_render_param *params)
ctx->next_frame = NULL;
if (!(frame->redraw || !frame->current))
wait_present_count += 1;
pthread_cond_broadcast(&ctx->video_wait);
mp_cond_broadcast(&ctx->video_wait);
talloc_free(ctx->cur_frame);
ctx->cur_frame = vo_frame_ref(frame);
} else {
@ -398,7 +398,7 @@ int mpv_render_context_render(mpv_render_context *ctx, mpv_render_param *params)
if (!frame)
frame = &dummy;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
MP_STATS(ctx, "glcb-render");
@ -413,10 +413,10 @@ int mpv_render_context_render(mpv_render_context *ctx, mpv_render_param *params)
if (GET_MPV_RENDER_PARAM(params, MPV_RENDER_PARAM_BLOCK_FOR_TARGET_TIME,
int, 1))
{
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
while (wait_present_count > ctx->present_count)
pthread_cond_wait(&ctx->video_wait, &ctx->lock);
pthread_mutex_unlock(&ctx->lock);
mp_cond_wait(&ctx->video_wait, &ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
return err;
@ -426,10 +426,10 @@ void mpv_render_context_report_swap(mpv_render_context *ctx)
{
MP_STATS(ctx, "glcb-reportflip");
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
ctx->flip_count += 1;
pthread_cond_broadcast(&ctx->video_wait);
pthread_mutex_unlock(&ctx->lock);
mp_cond_broadcast(&ctx->video_wait);
mp_mutex_unlock(&ctx->lock);
}
uint64_t mpv_render_context_update(mpv_render_context *ctx)
@ -438,10 +438,10 @@ uint64_t mpv_render_context_update(mpv_render_context *ctx)
mp_dispatch_queue_process(ctx->dispatch, 0);
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
if (ctx->next_frame)
res |= MPV_RENDER_UPDATE_FRAME;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return res;
}
@ -455,7 +455,7 @@ int mpv_render_context_get_info(mpv_render_context *ctx,
mpv_render_param param)
{
int res = MPV_ERROR_NOT_IMPLEMENTED;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
switch (param.type) {
case MPV_RENDER_PARAM_NEXT_FRAME_INFO: {
@ -477,7 +477,7 @@ int mpv_render_context_get_info(mpv_render_context *ctx,
default:;
}
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return res;
}
@ -486,12 +486,12 @@ static void draw_frame(struct vo *vo, struct vo_frame *frame)
struct vo_priv *p = vo->priv;
struct mpv_render_context *ctx = p->ctx;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
assert(!ctx->next_frame);
ctx->next_frame = vo_frame_ref(frame);
ctx->expected_flip_count = ctx->flip_count + 1;
ctx->redrawing = frame->redraw || !frame->current;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
update(ctx);
}
@ -500,13 +500,13 @@ static void flip_page(struct vo *vo)
{
struct vo_priv *p = vo->priv;
struct mpv_render_context *ctx = p->ctx;
struct timespec ts = mp_rel_time_to_timespec(0.2);
int64_t until = mp_time_ns_add(mp_time_ns(), 0.2);
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
// Wait until frame was rendered
while (ctx->next_frame) {
if (pthread_cond_timedwait(&ctx->video_wait, &ctx->lock, &ts)) {
if (mp_cond_timedwait_until(&ctx->video_wait, &ctx->lock, until)) {
if (ctx->next_frame) {
MP_VERBOSE(vo, "mpv_render_context_render() not being called "
"or stuck.\n");
@ -517,7 +517,7 @@ static void flip_page(struct vo *vo)
// Unblock mpv_render_context_render().
ctx->present_count += 1;
pthread_cond_broadcast(&ctx->video_wait);
mp_cond_broadcast(&ctx->video_wait);
if (ctx->redrawing)
goto done; // do not block for redrawing
@ -528,7 +528,7 @@ static void flip_page(struct vo *vo)
// Assume the user calls it consistently _if_ it's called at all.
if (!ctx->flip_count)
break;
if (pthread_cond_timedwait(&ctx->video_wait, &ctx->lock, &ts)) {
if (mp_cond_timedwait_until(&ctx->video_wait, &ctx->lock, until)) {
MP_VERBOSE(vo, "mpv_render_report_swap() not being called.\n");
goto done;
}
@ -542,11 +542,11 @@ done:
ctx->cur_frame = ctx->next_frame;
ctx->next_frame = NULL;
ctx->present_count += 2;
pthread_cond_signal(&ctx->video_wait);
mp_cond_signal(&ctx->video_wait);
vo_increment_drop_count(vo, 1);
}
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
static int query_format(struct vo *vo, int format)
@ -555,10 +555,10 @@ static int query_format(struct vo *vo, int format)
struct mpv_render_context *ctx = p->ctx;
bool ok = false;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
if (format >= IMGFMT_START && format < IMGFMT_END)
ok = ctx->imgfmt_supported[format - IMGFMT_START];
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
return ok;
}
@ -572,9 +572,9 @@ static void run_control_on_render_thread(void *p)
switch (request) {
case VOCTRL_SCREENSHOT: {
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
struct vo_frame *frame = vo_frame_ref(ctx->cur_frame);
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
if (frame && ctx->renderer->fns->screenshot)
ctx->renderer->fns->screenshot(ctx->renderer, frame, data);
talloc_free(frame);
@ -599,10 +599,10 @@ static int control(struct vo *vo, uint32_t request, void *data)
switch (request) {
case VOCTRL_RESET:
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
forget_frames(ctx, false);
ctx->need_reset = true;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
vo->want_redraw = true;
return VO_TRUE;
case VOCTRL_PAUSE:
@ -612,15 +612,15 @@ static int control(struct vo *vo, uint32_t request, void *data)
vo->want_redraw = true;
return VO_TRUE;
case VOCTRL_SET_PANSCAN:
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
ctx->need_resize = true;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
vo->want_redraw = true;
return VO_TRUE;
case VOCTRL_UPDATE_RENDER_OPTS:
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
ctx->need_update_external = true;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
vo->want_redraw = true;
return VO_TRUE;
}
@ -639,14 +639,14 @@ static int control(struct vo *vo, uint32_t request, void *data)
}
int r = VO_NOTIMPL;
pthread_mutex_lock(&ctx->control_lock);
mp_mutex_lock(&ctx->control_lock);
if (ctx->control_cb) {
int events = 0;
r = p->ctx->control_cb(vo, p->ctx->control_cb_ctx,
&events, request, data);
vo_event(vo, events);
}
pthread_mutex_unlock(&ctx->control_lock);
mp_mutex_unlock(&ctx->control_lock);
return r;
}
@ -668,12 +668,12 @@ static int reconfig(struct vo *vo, struct mp_image_params *params)
struct vo_priv *p = vo->priv;
struct mpv_render_context *ctx = p->ctx;
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
forget_frames(ctx, true);
ctx->img_params = *params;
ctx->need_reconfig = true;
ctx->need_resize = true;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
control(vo, VOCTRL_RECONFIG, NULL);
@ -687,7 +687,7 @@ static void uninit(struct vo *vo)
control(vo, VOCTRL_UNINIT, NULL);
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
forget_frames(ctx, true);
ctx->img_params = (struct mp_image_params){0};
@ -703,7 +703,7 @@ static void uninit(struct vo *vo)
assert(prev_in_use); // obviously must have been set
mp_dispatch_interrupt(ctx->dispatch);
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
}
static int preinit(struct vo *vo)
@ -720,11 +720,11 @@ static int preinit(struct vo *vo)
return -1;
}
pthread_mutex_lock(&ctx->lock);
mp_mutex_lock(&ctx->lock);
ctx->vo = vo;
ctx->need_resize = true;
ctx->need_update_external = true;
pthread_mutex_unlock(&ctx->lock);
mp_mutex_unlock(&ctx->lock);
vo->hwdec_devs = ctx->hwdec_devs;
control(vo, VOCTRL_PREINIT, NULL);

View File

@ -84,8 +84,8 @@ struct priv {
// for RAM input
MMAL_POOL_T *swpool;
pthread_mutex_t display_mutex;
pthread_cond_t display_cond;
mp_mutex display_mutex;
mp_cond display_cond;
int64_t vsync_counter;
bool reload_display;
@ -476,14 +476,14 @@ static int set_geometry(struct vo *vo)
static void wait_next_vsync(struct vo *vo)
{
struct priv *p = vo->priv;
pthread_mutex_lock(&p->display_mutex);
struct timespec end = mp_rel_time_to_timespec(0.050);
mp_mutex_lock(&p->display_mutex);
int64_t end = mp_time_ns_add(mp_time_ns(), 0.050);
int64_t old = p->vsync_counter;
while (old == p->vsync_counter && !p->reload_display) {
if (pthread_cond_timedwait(&p->display_cond, &p->display_mutex, &end))
if (mp_cond_timedwait_until(&p->display_cond, &p->display_mutex, end))
break;
}
pthread_mutex_unlock(&p->display_mutex);
mp_mutex_unlock(&p->display_mutex);
}
static void flip_page(struct vo *vo)
@ -755,10 +755,10 @@ static int control(struct vo *vo, uint32_t request, void *data)
*(struct mp_image **)data = take_screenshot(vo);
return VO_TRUE;
case VOCTRL_CHECK_EVENTS: {
pthread_mutex_lock(&p->display_mutex);
mp_mutex_lock(&p->display_mutex);
bool reload_required = p->reload_display;
p->reload_display = false;
pthread_mutex_unlock(&p->display_mutex);
mp_mutex_unlock(&p->display_mutex);
if (reload_required)
recreate_renderer(vo);
return VO_TRUE;
@ -780,10 +780,10 @@ static void tv_callback(void *callback_data, uint32_t reason, uint32_t param1,
{
struct vo *vo = callback_data;
struct priv *p = vo->priv;
pthread_mutex_lock(&p->display_mutex);
mp_mutex_lock(&p->display_mutex);
p->reload_display = true;
pthread_cond_signal(&p->display_cond);
pthread_mutex_unlock(&p->display_mutex);
mp_cond_signal(&p->display_cond);
mp_mutex_unlock(&p->display_mutex);
vo_wakeup(vo);
}
@ -791,10 +791,10 @@ static void vsync_callback(DISPMANX_UPDATE_HANDLE_T u, void *arg)
{
struct vo *vo = arg;
struct priv *p = vo->priv;
pthread_mutex_lock(&p->display_mutex);
mp_mutex_lock(&p->display_mutex);
p->vsync_counter += 1;
pthread_cond_signal(&p->display_cond);
pthread_mutex_unlock(&p->display_mutex);
mp_cond_signal(&p->display_cond);
mp_mutex_unlock(&p->display_mutex);
}
static void destroy_dispmanx(struct vo *vo)
@ -865,8 +865,8 @@ static void uninit(struct vo *vo)
mmal_vc_deinit();
pthread_cond_destroy(&p->display_cond);
pthread_mutex_destroy(&p->display_mutex);
mp_cond_destroy(&p->display_cond);
mp_mutex_destroy(&p->display_mutex);
}
static int preinit(struct vo *vo)
@ -886,8 +886,8 @@ static int preinit(struct vo *vo)
return -1;
}
pthread_mutex_init(&p->display_mutex, NULL);
pthread_cond_init(&p->display_cond, NULL);
mp_mutex_init(&p->display_mutex);
mp_cond_init(&p->display_cond);
p->opts_cache = m_config_cache_alloc(p, vo->global, &vo_sub_opts);

View File

@ -17,7 +17,6 @@
#include <assert.h>
#include <limits.h>
#include <pthread.h>
#include <stdatomic.h>
#include <stdio.h>
@ -91,7 +90,7 @@ struct vo_w32_state {
struct m_config_cache *opts_cache;
struct input_ctx *input_ctx;
pthread_t thread;
mp_thread thread;
bool terminate;
struct mp_dispatch_queue *dispatch; // used to run stuff on the GUI thread
bool in_dispatch;
@ -1510,7 +1509,7 @@ static LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam,
return DefWindowProcW(hWnd, message, wParam, lParam);
}
static pthread_once_t window_class_init_once = PTHREAD_ONCE_INIT;
static mp_once window_class_init_once = MP_STATIC_ONCE_INITIALIZER;
static ATOM window_class;
static void register_window_class(void)
{
@ -1528,7 +1527,7 @@ static void register_window_class(void)
static ATOM get_window_class(void)
{
pthread_once(&window_class_init_once, register_window_class);
mp_exec_once(&window_class_init_once, register_window_class);
return window_class;
}
@ -1728,13 +1727,13 @@ static void w32_api_load(struct vo_w32_state *w32)
(void *)GetProcAddress(uxtheme_dll, MAKEINTRESOURCEA(135));
}
static void *gui_thread(void *ptr)
static MP_THREAD_VOID gui_thread(void *ptr)
{
struct vo_w32_state *w32 = ptr;
bool ole_ok = false;
int res = 0;
mpthread_set_name("window");
mp_thread_set_name("window");
w32_api_load(w32);
@ -1847,7 +1846,7 @@ done:
if (ole_ok)
OleUninitialize();
SetThreadExecutionState(ES_CONTINUOUS);
return NULL;
MP_THREAD_RETURN();
}
bool vo_w32_init(struct vo *vo)
@ -1865,11 +1864,11 @@ bool vo_w32_init(struct vo *vo)
w32->opts = w32->opts_cache->opts;
vo->w32 = w32;
if (pthread_create(&w32->thread, NULL, gui_thread, w32))
if (mp_thread_create(&w32->thread, gui_thread, w32))
goto fail;
if (!mp_rendezvous(w32, 0)) { // init barrier
pthread_join(w32->thread, NULL);
mp_thread_join(w32->thread);
goto fail;
}
@ -2120,7 +2119,7 @@ void vo_w32_uninit(struct vo *vo)
return;
mp_dispatch_run(w32->dispatch, do_terminate, w32);
pthread_join(w32->thread, NULL);
mp_thread_join(w32->thread);
AvRevertMmThreadCharacteristics(w32->avrt_handle);

View File

@ -18,7 +18,6 @@
#include <windows.h>
#include <stdbool.h>
#include <string.h>
#include <pthread.h>
#include "displayconfig.h"

View File

@ -20,7 +20,6 @@
#include <stdbool.h>
#include <inttypes.h>
#include <pthread.h>
#include <va/va.h>
#include "mp_image.h"

View File

@ -46,9 +46,9 @@ static void preemption_callback(VdpDevice device, void *context)
{
struct mp_vdpau_ctx *ctx = context;
pthread_mutex_lock(&ctx->preempt_lock);
mp_mutex_lock(&ctx->preempt_lock);
ctx->is_preempted = true;
pthread_mutex_unlock(&ctx->preempt_lock);
mp_mutex_unlock(&ctx->preempt_lock);
}
static int win_x11_init_vdpau_procs(struct mp_vdpau_ctx *ctx, bool probing)
@ -163,7 +163,7 @@ static int handle_preemption(struct mp_vdpau_ctx *ctx)
int mp_vdpau_handle_preemption(struct mp_vdpau_ctx *ctx, uint64_t *counter)
{
int r = 1;
pthread_mutex_lock(&ctx->preempt_lock);
mp_mutex_lock(&ctx->preempt_lock);
const void *p[4] = {&(uint32_t){0}};
uint32_t stride[4] = {4};
@ -182,7 +182,7 @@ int mp_vdpau_handle_preemption(struct mp_vdpau_ctx *ctx, uint64_t *counter)
r = 0; // signal recovery after preemption
}
pthread_mutex_unlock(&ctx->preempt_lock);
mp_mutex_unlock(&ctx->preempt_lock);
return r;
}
@ -196,10 +196,10 @@ static void release_decoder_surface(void *ptr)
struct surface_ref *r = ptr;
struct mp_vdpau_ctx *ctx = r->ctx;
pthread_mutex_lock(&ctx->pool_lock);
mp_mutex_lock(&ctx->pool_lock);
assert(ctx->video_surfaces[r->index].in_use);
ctx->video_surfaces[r->index].in_use = false;
pthread_mutex_unlock(&ctx->pool_lock);
mp_mutex_unlock(&ctx->pool_lock);
talloc_free(r);
}
@ -238,7 +238,7 @@ static struct mp_image *mp_vdpau_get_surface(struct mp_vdpau_ctx *ctx,
rgb_format = (VdpChromaType)-1;
}
pthread_mutex_lock(&ctx->pool_lock);
mp_mutex_lock(&ctx->pool_lock);
// Destroy all unused surfaces that don't have matching parameters
for (int n = 0; n < MAX_VIDEO_SURFACES; n++) {
@ -317,7 +317,7 @@ done: ;
if (surface_index >= 0)
mpi = create_ref(ctx, surface_index);
pthread_mutex_unlock(&ctx->pool_lock);
mp_mutex_unlock(&ctx->pool_lock);
if (!mpi)
MP_ERR(ctx, "no surfaces available in mp_vdpau_get_video_surface\n");
@ -363,8 +363,8 @@ static void free_device_ref(struct AVHWDeviceContext *hwctx)
if (ctx->close_display)
XCloseDisplay(ctx->x11);
pthread_mutex_destroy(&ctx->pool_lock);
pthread_mutex_destroy(&ctx->preempt_lock);
mp_mutex_destroy(&ctx->pool_lock);
mp_mutex_destroy(&ctx->preempt_lock);
talloc_free(ctx);
}
@ -388,8 +388,8 @@ struct mp_vdpau_ctx *mp_vdpau_create_device_x11(struct mp_log *log, Display *x11
.av_device_ref = avref,
},
};
mpthread_mutex_init_recursive(&ctx->preempt_lock);
pthread_mutex_init(&ctx->pool_lock, NULL);
mp_mutex_init_type(&ctx->preempt_lock, MP_MUTEX_RECURSIVE);
mp_mutex_init(&ctx->pool_lock);
hwctx->free = free_device_ref;
hwctx->user_opaque = ctx;

View File

@ -4,13 +4,12 @@
#include <stdbool.h>
#include <inttypes.h>
#include <pthread.h>
#include <vdpau/vdpau.h>
#include <vdpau/vdpau_x11.h>
#include "common/msg.h"
#include "hwdec.h"
#include "osdep/threads.h"
#include "config.h"
#if !HAVE_GPL
@ -64,7 +63,7 @@ struct mp_vdpau_ctx {
VdpGetProcAddress *get_proc_address;
VdpDevice vdp_device;
pthread_mutex_t preempt_lock;
mp_mutex preempt_lock;
bool is_preempted; // set to true during unavailability
uint64_t preemption_counter; // incremented after _restoring_
bool preemption_user_notified;
@ -72,7 +71,7 @@ struct mp_vdpau_ctx {
VdpOutputSurface preemption_obj; // dummy for reliable preempt. check
// Surface pool
pthread_mutex_t pool_lock;
mp_mutex pool_lock;
int64_t age_counter;
struct surface_entry {
VdpVideoSurface surface;