obs-source.c 50.0 KB
Newer Older
J
jp9000 已提交
1
/******************************************************************************
2
    Copyright (C) 2013-2014 by Hugh Bailey <obs.jim@gmail.com>
J
jp9000 已提交
3 4 5

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
6
    the Free Software Foundation, either version 2 of the License, or
J
jp9000 已提交
7 8 9 10 11 12 13 14 15 16 17
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/

18 19
#include <inttypes.h>

20
#include "media-io/format-conversion.h"
21
#include "media-io/video-frame.h"
22
#include "media-io/audio-io.h"
J
jp9000 已提交
23
#include "util/threading.h"
24
#include "util/platform.h"
25
#include "callback/calldata.h"
26 27
#include "graphics/matrix3.h"
#include "graphics/vec3.h"
28

J
jp9000 已提交
29
#include "obs.h"
J
jp9000 已提交
30
#include "obs-internal.h"
J
jp9000 已提交
31

32 33 34 35 36
static inline bool source_valid(struct obs_source *source)
{
	return source && source->context.data;
}

J
jp9000 已提交
37
static inline const struct obs_source_info *find_source(struct darray *list,
38
		const char *id)
J
jp9000 已提交
39 40
{
	size_t i;
J
jp9000 已提交
41
	struct obs_source_info *array = list->array;
J
jp9000 已提交
42 43

	for (i = 0; i < list->num; i++) {
J
jp9000 已提交
44
		struct obs_source_info *info = array+i;
45
		if (strcmp(info->id, id) == 0)
J
jp9000 已提交
46 47 48 49 50 51
			return info;
	}

	return NULL;
}

J
jp9000 已提交
52
static const struct obs_source_info *get_source_info(enum obs_source_type type,
53 54 55 56 57
		const char *id)
{
	struct darray *list = NULL;

	switch (type) {
J
jp9000 已提交
58 59 60 61 62 63 64 65 66 67 68
	case OBS_SOURCE_TYPE_INPUT:
		list = &obs->input_types.da;
		break;

	case OBS_SOURCE_TYPE_FILTER:
		list = &obs->filter_types.da;
		break;

	case OBS_SOURCE_TYPE_TRANSITION:
		list = &obs->transition_types.da;
		break;
69 70 71 72 73
	}

	return find_source(list, id);
}

74 75 76 77 78 79 80 81
static const char *source_signals[] = {
	"void destroy(ptr source)",
	"void add(ptr source)",
	"void remove(ptr source)",
	"void activate(ptr source)",
	"void deactivate(ptr source)",
	"void show(ptr source)",
	"void hide(ptr source)",
J
jp9000 已提交
82
	"void rename(ptr source, string new_name, string prev_name)",
83
	"void volume(ptr source, in out float volume)",
84 85
	"void volume_level(ptr source, float level, float magnitude, "
		"float peak)",
86 87 88
	NULL
};

89 90
bool obs_source_init_context(struct obs_source *source,
		obs_data_t settings, const char *name)
91
{
92
	if (!obs_context_data_init(&source->context, settings, name))
93 94
		return false;

95 96
	return signal_handler_add_array(source->context.signals,
			source_signals);
97 98
}

99
const char *obs_source_getdisplayname(enum obs_source_type type, const char *id)
100
{
J
jp9000 已提交
101
	const struct obs_source_info *info = get_source_info(type, id);
102
	return (info != NULL) ? info->getname() : NULL;
103 104
}

105
/* internal initialization */
J
jp9000 已提交
106 107
bool obs_source_init(struct obs_source *source,
		const struct obs_source_info *info)
J
jp9000 已提交
108
{
109
	source->refs = 1;
J
jp9000 已提交
110
	source->user_volume = 1.0f;
111
	source->present_volume = 0.0f;
J
jp9000 已提交
112
	source->sync_offset = 0;
113 114 115
	pthread_mutex_init_value(&source->filter_mutex);
	pthread_mutex_init_value(&source->video_mutex);
	pthread_mutex_init_value(&source->audio_mutex);
116

117 118 119 120 121 122
	if (pthread_mutex_init(&source->filter_mutex, NULL) != 0)
		return false;
	if (pthread_mutex_init(&source->audio_mutex, NULL) != 0)
		return false;
	if (pthread_mutex_init(&source->video_mutex, NULL) != 0)
		return false;
J
jp9000 已提交
123

J
jp9000 已提交
124
	if (info->output_flags & OBS_SOURCE_AUDIO) {
J
jp9000 已提交
125
		source->audio_line = audio_output_createline(obs->audio.audio,
126
				source->context.name);
127 128
		if (!source->audio_line) {
			blog(LOG_ERROR, "Failed to create audio line for "
129
			                "source '%s'", source->context.name);
130 131 132
			return false;
		}
	}
133

134 135 136
	obs_context_data_insert(&source->context,
			&obs->data.sources_mutex,
			&obs->data.first_source);
137
	return true;
J
jp9000 已提交
138 139
}

140
static inline void obs_source_dosignal(struct obs_source *source,
141
		const char *signal_obs, const char *signal_source)
142 143 144 145 146
{
	struct calldata data;

	calldata_init(&data);
	calldata_setptr(&data, "source", source);
147 148 149
	if (signal_obs)
		signal_handler_signal(obs->signals, signal_obs, &data);
	if (signal_source)
150 151
		signal_handler_signal(source->context.signals, signal_source,
				&data);
152 153 154
	calldata_free(&data);
}

155
obs_source_t obs_source_create(enum obs_source_type type, const char *id,
156
		const char *name, obs_data_t settings)
J
jp9000 已提交
157 158 159
{
	struct obs_source *source;

J
jp9000 已提交
160
	const struct obs_source_info *info = get_source_info(type, id);
J
jp9000 已提交
161
	if (!info) {
J
jp9000 已提交
162
		blog(LOG_ERROR, "Source '%s' not found", id);
J
jp9000 已提交
163 164 165
		return NULL;
	}

166 167
	source       = bzalloc(sizeof(struct obs_source));
	source->info = *info;
168

169
	if (!obs_source_init_context(source, settings, name))
170 171
		goto fail;

J
jp9000 已提交
172
	if (info->defaults)
173
		info->defaults(source->context.settings);
J
jp9000 已提交
174

175 176
	/* allow the source to be created even if creation fails so that the
	 * user's data doesn't become lost */
177 178
	source->context.data = info->create(source->context.settings, source);
	if (!source->context.data)
179
		blog(LOG_ERROR, "Failed to create source '%s'!", name);
180

J
jp9000 已提交
181
	if (!obs_source_init(source, info))
182
		goto fail;
J
jp9000 已提交
183

184
	blog(LOG_INFO, "source '%s' (%s) created", name, id);
185
	obs_source_dosignal(source, "source_create", NULL);
J
jp9000 已提交
186
	return source;
187 188 189 190 191

fail:
	blog(LOG_ERROR, "obs_source_create failed");
	obs_source_destroy(source);
	return NULL;
J
jp9000 已提交
192 193
}

194 195
void source_frame_init(struct source_frame *frame, enum video_format format,
		uint32_t width, uint32_t height)
196
{
197
	struct video_frame vid_frame;
J
jp9000 已提交
198 199 200 201

	if (!frame)
		return;

202
	video_frame_init(&vid_frame, format, width, height);
203 204 205
	frame->format = format;
	frame->width  = width;
	frame->height = height;
206

207 208 209
	for (size_t i = 0; i < MAX_AV_PLANES; i++) {
		frame->data[i]     = vid_frame.data[i];
		frame->linesize[i] = vid_frame.linesize[i];
210 211 212
	}
}

213
void obs_source_destroy(struct obs_source *source)
J
jp9000 已提交
214
{
215
	size_t i;
216

J
jp9000 已提交
217 218 219
	if (!source)
		return;

220 221
	obs_context_data_remove(&source->context);

222 223
	blog(LOG_INFO, "source '%s' destroyed", source->context.name);

224
	obs_source_dosignal(source, "source_destroy", "destroy");
225

226
	if (source->context.data) {
227
		source->info.destroy(source->context.data);
228 229
		source->context.data = NULL;
	}
230

231 232
	if (source->filter_parent)
		obs_source_filter_remove(source->filter_parent, source);
233

234 235
	for (i = 0; i < source->filters.num; i++)
		obs_source_release(source->filters.array[i]);
236

237 238
	for (i = 0; i < source->video_frames.num; i++)
		source_frame_destroy(source->video_frames.array[i]);
239

240
	gs_entercontext(obs->video.graphics);
P
Palana 已提交
241
	texrender_destroy(source->async_convert_texrender);
242
	texture_destroy(source->async_texture);
243
	gs_leavecontext();
J
jp9000 已提交
244

J
jp9000 已提交
245
	for (i = 0; i < MAX_AV_PLANES; i++)
246 247
		bfree(source->audio_data.data[i]);

248 249 250
	audio_line_destroy(source->audio_line);
	audio_resampler_destroy(source->resampler);

J
jp9000 已提交
251
	texrender_destroy(source->filter_texrender);
252 253 254 255 256
	da_free(source->video_frames);
	da_free(source->filters);
	pthread_mutex_destroy(&source->filter_mutex);
	pthread_mutex_destroy(&source->audio_mutex);
	pthread_mutex_destroy(&source->video_mutex);
257
	obs_context_data_free(&source->context);
258 259 260
	bfree(source);
}

P
Palana 已提交
261
void obs_source_addref(obs_source_t source)
262
{
P
Palana 已提交
263
	if (source)
J
jp9000 已提交
264
		os_atomic_inc_long(&source->refs);
265 266
}

P
Palana 已提交
267
void obs_source_release(obs_source_t source)
268
{
P
Palana 已提交
269 270
	if (!source)
		return;
271

J
jp9000 已提交
272
	if (os_atomic_dec_long(&source->refs) == 0)
P
Palana 已提交
273
		obs_source_destroy(source);
274 275 276 277
}

void obs_source_remove(obs_source_t source)
{
278
	struct obs_core_data *data = &obs->data;
279
	size_t id;
280
	bool   exists;
281 282 283

	pthread_mutex_lock(&data->sources_mutex);

J
jp9000 已提交
284 285
	if (!source || source->removed) {
		pthread_mutex_unlock(&data->sources_mutex);
J
jp9000 已提交
286
		return;
J
jp9000 已提交
287
	}
J
jp9000 已提交
288

J
jp9000 已提交
289
	source->removed = true;
J
jp9000 已提交
290

J
jp9000 已提交
291 292
	obs_source_addref(source);

293 294 295 296
	id = da_find(data->user_sources, &source, 0);
	exists = (id != DARRAY_INVALID);
	if (exists) {
		da_erase(data->user_sources, id);
J
jp9000 已提交
297
		obs_source_release(source);
298 299 300
	}

	pthread_mutex_unlock(&data->sources_mutex);
J
jp9000 已提交
301

302 303 304
	if (exists)
		obs_source_dosignal(source, "source_remove", "remove");

J
jp9000 已提交
305
	obs_source_release(source);
306 307 308 309
}

bool obs_source_removed(obs_source_t source)
{
J
jp9000 已提交
310
	return source ? source->removed : true;
J
jp9000 已提交
311 312
}

J
jp9000 已提交
313 314 315 316 317 318 319 320
static inline obs_data_t get_defaults(const struct obs_source_info *info)
{
	obs_data_t settings = obs_data_create();
	if (info->defaults)
		info->defaults(settings);
	return settings;
}

J
jp9000 已提交
321 322 323
obs_data_t obs_source_settings(enum obs_source_type type, const char *id)
{
	const struct obs_source_info *info = get_source_info(type, id);
J
jp9000 已提交
324
	return (info) ? get_defaults(info) : NULL;
J
jp9000 已提交
325 326
}

327
obs_properties_t obs_get_source_properties(enum obs_source_type type,
328
		const char *id)
J
jp9000 已提交
329
{
J
jp9000 已提交
330
	const struct obs_source_info *info = get_source_info(type, id);
J
jp9000 已提交
331 332 333 334
	if (info && info->properties) {
		obs_data_t       defaults = get_defaults(info);
		obs_properties_t properties;

335
		properties = info->properties();
J
jp9000 已提交
336 337 338 339
		obs_properties_apply_settings(properties, defaults);
		obs_data_release(defaults);
		return properties;
	}
J
jp9000 已提交
340 341 342
	return NULL;
}

343
obs_properties_t obs_source_properties(obs_source_t source)
344
{
345
	if (source_valid(source) && source->info.properties) {
J
jp9000 已提交
346
		obs_properties_t props;
347
		props = source->info.properties();
348
		obs_properties_apply_settings(props, source->context.settings);
J
jp9000 已提交
349 350 351
		return props;
	}

352 353 354
	return NULL;
}

355
uint32_t obs_source_get_output_flags(obs_source_t source)
J
jp9000 已提交
356
{
J
jp9000 已提交
357
	return source ? source->info.output_flags : 0;
J
jp9000 已提交
358 359
}

360 361
static void obs_source_deferred_update(obs_source_t source)
{
362 363 364 365
	if (source->context.data && source->info.update)
		source->info.update(source->context.data,
				source->context.settings);

366 367 368
	source->defer_update = false;
}

369
void obs_source_update(obs_source_t source, obs_data_t settings)
J
jp9000 已提交
370
{
J
jp9000 已提交
371 372
	if (!source) return;

373 374 375 376 377 378 379 380
	if (settings)
		obs_data_apply(source->context.settings, settings);

	if (source->info.output_flags & OBS_SOURCE_VIDEO) {
		source->defer_update = true;
	} else if (source->context.data && source->info.update) {
		source->info.update(source->context.data,
				source->context.settings);
381
	}
J
jp9000 已提交
382 383
}

384
static void activate_source(obs_source_t source)
J
jp9000 已提交
385
{
386
	if (source->context.data && source->info.activate)
387
		source->info.activate(source->context.data);
388
	obs_source_dosignal(source, "source_activate", "activate");
J
jp9000 已提交
389 390
}

391
static void deactivate_source(obs_source_t source)
J
jp9000 已提交
392
{
393
	if (source->context.data && source->info.deactivate)
394
		source->info.deactivate(source->context.data);
395
	obs_source_dosignal(source, "source_deactivate", "deactivate");
396
}
397

398 399
static void show_source(obs_source_t source)
{
400
	if (source->context.data && source->info.show)
401
		source->info.show(source->context.data);
402
	obs_source_dosignal(source, "source_show", "show");
403 404 405 406
}

static void hide_source(obs_source_t source)
{
407
	if (source->context.data && source->info.hide)
408
		source->info.hide(source->context.data);
409
	obs_source_dosignal(source, "source_hide", "hide");
410 411 412 413
}

static void activate_tree(obs_source_t parent, obs_source_t child, void *param)
{
J
jp9000 已提交
414
	if (os_atomic_inc_long(&child->activate_refs) == 1)
415
		activate_source(child);
J
jp9000 已提交
416 417 418

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
419 420 421 422 423
}

static void deactivate_tree(obs_source_t parent, obs_source_t child,
		void *param)
{
J
jp9000 已提交
424
	if (os_atomic_dec_long(&child->activate_refs) == 0)
425
		deactivate_source(child);
J
jp9000 已提交
426 427 428

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
429 430
}

431 432
static void show_tree(obs_source_t parent, obs_source_t child, void *param)
{
J
jp9000 已提交
433
	if (os_atomic_inc_long(&child->show_refs) == 1)
434 435 436 437 438 439 440 441
		show_source(child);

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

static void hide_tree(obs_source_t parent, obs_source_t child, void *param)
{
J
jp9000 已提交
442
	if (os_atomic_dec_long(&child->show_refs) == 0)
443 444 445 446 447 448 449
		hide_source(child);

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

void obs_source_activate(obs_source_t source, enum view_type type)
450 451 452
{
	if (!source) return;

J
jp9000 已提交
453
	if (os_atomic_inc_long(&source->show_refs) == 1) {
454 455 456 457 458
		show_source(source);
		obs_source_enum_tree(source, show_tree, NULL);
	}

	if (type == MAIN_VIEW) {
J
jp9000 已提交
459
		if (os_atomic_inc_long(&source->activate_refs) == 1) {
460 461 462 463
			activate_source(source);
			obs_source_enum_tree(source, activate_tree, NULL);
			obs_source_set_present_volume(source, 1.0f);
		}
464 465 466
	}
}

467
void obs_source_deactivate(obs_source_t source, enum view_type type)
468 469 470
{
	if (!source) return;

J
jp9000 已提交
471
	if (os_atomic_dec_long(&source->show_refs) == 0) {
472 473 474 475 476
		hide_source(source);
		obs_source_enum_tree(source, hide_tree, NULL);
	}

	if (type == MAIN_VIEW) {
J
jp9000 已提交
477
		if (os_atomic_dec_long(&source->activate_refs) == 0) {
478 479 480 481
			deactivate_source(source);
			obs_source_enum_tree(source, deactivate_tree, NULL);
			obs_source_set_present_volume(source, 0.0f);
		}
482
	}
J
jp9000 已提交
483 484
}

485
void obs_source_video_tick(obs_source_t source, float seconds)
J
jp9000 已提交
486
{
J
jp9000 已提交
487 488
	if (!source) return;

489 490 491
	if (source->defer_update)
		obs_source_deferred_update(source);

J
jp9000 已提交
492 493 494 495
	/* reset the filter render texture information once every frame */
	if (source->filter_texrender)
		texrender_reset(source->filter_texrender);

496
	if (source->context.data && source->info.video_tick)
497
		source->info.video_tick(source->context.data, seconds);
J
jp9000 已提交
498 499
}

500
/* unless the value is 3+ hours worth of frames, this won't overflow */
J
jp9000 已提交
501
static inline uint64_t conv_frames_to_time(size_t frames)
502
{
J
jp9000 已提交
503 504
	const struct audio_output_info *info;
	info = audio_output_getinfo(obs->audio.audio);
505 506 507

	return (uint64_t)frames * 1000000000ULL /
		(uint64_t)info->samples_per_sec;
508 509
}

510
/* maximum "direct" timestamp variance in nanoseconds */
511
#define MAX_TS_VAR          5000000000ULL
512
/* maximum time that timestamp can jump in nanoseconds */
513 514 515 516
#define MAX_TIMESTAMP_JUMP  2000000000ULL
/* time threshold in nanoseconds to ensure audio timing is as seamless as
 * possible */
#define TS_SMOOTHING_THRESHOLD 70000000ULL
517 518 519 520 521 522

static inline void reset_audio_timing(obs_source_t source, uint64_t timetamp)
{
	source->timing_set    = true;
	source->timing_adjust = os_gettime_ns() - timetamp;
}
523

524 525
static inline void handle_ts_jump(obs_source_t source, uint64_t expected,
		uint64_t ts, uint64_t diff)
526
{
J
jp9000 已提交
527
	blog(LOG_DEBUG, "Timestamp for source '%s' jumped by '%"PRIu64"', "
528
	                "expected value %"PRIu64", input value %"PRIu64,
529
	                source->context.name, diff, expected, ts);
530 531

	/* if has video, ignore audio data until reset */
532
	if (source->info.output_flags & OBS_SOURCE_ASYNC)
533
		os_atomic_dec_long(&source->av_sync_ref);
534
	else
535 536 537
		reset_audio_timing(source, ts);
}

538 539 540 541 542 543 544 545 546 547
#define VOL_MIN -96.0f
#define VOL_MAX  0.0f

static inline float to_db(float val)
{
	float db = 20.0f * log10f(val);
	return isfinite(db) ? db : VOL_MIN;
}

static void calc_volume_levels(struct obs_source *source, float *array,
548
		size_t frames, float volume)
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
{
	float sum_val = 0.0f;
	float max_val = 0.0f;
	float rms_val = 0.0f;

	const uint32_t sample_rate    = audio_output_samplerate(obs_audio());
	const size_t   channels       = audio_output_channels(obs_audio());
	const size_t   count          = frames * channels;
	const size_t   vol_peak_delay = sample_rate * 3;
	const float    alpha          = 0.15f;

	for (size_t i = 0; i < count; i++) {
		float val      = array[i];
		float val_pow2 = val * val;

		sum_val += val_pow2;
		max_val  = fmaxf(max_val, val_pow2);
	}

568 569 570 571 572 573 574 575 576
	/*
	  We want the volume meters scale linearly in respect to current
	  volume, so, no need to apply volume here.
	*/

	UNUSED_PARAMETER(volume);

	rms_val = to_db(sqrtf(sum_val / (float)count));
	max_val = to_db(sqrtf(max_val));
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601

	if (max_val > source->vol_max)
		source->vol_max = max_val;
	else
		source->vol_max = alpha * source->vol_max +
			(1.0f - alpha) * max_val;

	if (source->vol_max > source->vol_peak ||
	    source->vol_update_count > vol_peak_delay) {
		source->vol_peak         = source->vol_max;
		source->vol_update_count = 0;
	} else {
		source->vol_update_count += count;
	}

	source->vol_mag = alpha * rms_val + source->vol_mag * (1.0f - alpha);
}

/* TODO update peak/etc later */
static void obs_source_update_volume_level(obs_source_t source,
		struct audio_data *in)
{
	if (source && in) {
		struct calldata data = {0};

602 603
		calc_volume_levels(source, (float*)in->data[0], in->frames,
				in->volume);
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618

		calldata_setptr  (&data, "source",    source);
		calldata_setfloat(&data, "level",     source->vol_max);
		calldata_setfloat(&data, "magnitude", source->vol_mag);
		calldata_setfloat(&data, "peak",      source->vol_peak);

		signal_handler_signal(source->context.signals, "volume_level",
				&data);
		signal_handler_signal(obs->signals, "source_volume_level",
				&data);

		calldata_free(&data);
	}
}

619 620 621 622
static void source_output_audio_line(obs_source_t source,
		const struct audio_data *data)
{
	struct audio_data in = *data;
623
	uint64_t diff;
624 625

	if (!source->timing_set) {
626
		reset_audio_timing(source, in.timestamp);
627 628

		/* detects 'directly' set timestamps as long as they're within
629
		 * a certain threshold */
630
		if ((source->timing_adjust + MAX_TS_VAR) < MAX_TS_VAR * 2)
631
			source->timing_adjust = 0;
632

633
	} else {
634
		bool ts_under = (in.timestamp < source->next_audio_ts_min);
635

636 637 638 639 640
		diff = ts_under ?
			(source->next_audio_ts_min - in.timestamp) :
			(in.timestamp - source->next_audio_ts_min);

		/* smooth audio if lower or within threshold */
641
		if (diff > MAX_TIMESTAMP_JUMP)
642 643 644 645
			handle_ts_jump(source, source->next_audio_ts_min,
					in.timestamp, diff);
		else if (ts_under || diff < TS_SMOOTHING_THRESHOLD)
			in.timestamp = source->next_audio_ts_min;
646 647
	}

648
	source->next_audio_ts_min = in.timestamp +
J
jp9000 已提交
649
		conv_frames_to_time(in.frames);
650

651
	if (source->av_sync_ref != 0)
652 653
		return;

J
jp9000 已提交
654
	in.timestamp += source->timing_adjust + source->sync_offset;
J
jp9000 已提交
655 656
	in.volume = source->user_volume * source->present_volume *
		obs->audio.user_volume * obs->audio.present_volume;
657

658
	audio_line_output(source->audio_line, &in);
659
	obs_source_update_volume_level(source, &in);
660 661
}

662 663 664 665 666 667 668 669
enum convert_type {
	CONVERT_NONE,
	CONVERT_NV12,
	CONVERT_420,
	CONVERT_422_U,
	CONVERT_422_Y,
};

670
static inline enum convert_type get_convert_type(enum video_format format)
671
{
672
	switch (format) {
673 674 675 676 677 678 679 680 681 682 683
	case VIDEO_FORMAT_I420:
		return CONVERT_420;
	case VIDEO_FORMAT_NV12:
		return CONVERT_NV12;

	case VIDEO_FORMAT_YVYU:
	case VIDEO_FORMAT_YUY2:
		return CONVERT_422_Y;
	case VIDEO_FORMAT_UYVY:
		return CONVERT_422_U;

684
	case VIDEO_FORMAT_NONE:
685 686 687 688 689 690 691 692 693
	case VIDEO_FORMAT_RGBA:
	case VIDEO_FORMAT_BGRA:
	case VIDEO_FORMAT_BGRX:
		return CONVERT_NONE;
	}

	return CONVERT_NONE;
}

694 695 696 697
static inline bool set_packed422_sizes(struct obs_source *source,
		struct source_frame *frame)
{
	source->async_convert_height = frame->height;
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
	source->async_convert_width  = frame->width / 2;
	source->async_texture_format = GS_BGRA;
	return true;
}

static inline bool set_planar420_sizes(struct obs_source *source,
		struct source_frame *frame)
{
	uint32_t size = frame->width * frame->height;
	size += size/2;

	source->async_convert_width   = frame->width;
	source->async_convert_height  = (size / frame->width + 1) & 0xFFFFFFFE;
	source->async_texture_format  = GS_R8;
	source->async_plane_offset[0] = frame->width * frame->height;
	source->async_plane_offset[1] = source->async_plane_offset[0] +
		frame->width * frame->height / 4;
715 716 717 718 719 720 721 722 723 724 725 726
	return true;
}

static inline bool init_gpu_conversion(struct obs_source *source,
		struct source_frame *frame)
{
	switch (get_convert_type(frame->format)) {
		case CONVERT_422_Y:
		case CONVERT_422_U:
			return set_packed422_sizes(source, frame);

		case CONVERT_420:
727 728 729 730
			return set_planar420_sizes(source, frame);

		case CONVERT_NV12:
			assert(false && "NV12 not yet implemented");
731 732 733 734 735 736 737 738 739 740 741
			/* TODO: implement conversion */
			break;

		case CONVERT_NONE:
			assert(false && "No conversion requested");
			break;

	}
	return false;
}

742 743 744 745 746 747 748 749 750 751 752
static inline enum gs_color_format convert_video_format(
		enum video_format format)
{
	if (format == VIDEO_FORMAT_RGBA)
		return GS_RGBA;
	else if (format == VIDEO_FORMAT_BGRA)
		return GS_BGRA;

	return GS_BGRX;
}

753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
static inline bool set_async_texture_size(struct obs_source *source,
		struct source_frame *frame)
{
	enum convert_type prev, cur;
	prev = get_convert_type(source->async_format);
	cur  = get_convert_type(frame->format);
	if (source->async_texture) {
		if (source->async_width  == frame->width &&
		    source->async_height == frame->height &&
		    prev == cur)
			return true;
	}

	texture_destroy(source->async_texture);
	texrender_destroy(source->async_convert_texrender);
	source->async_convert_texrender = NULL;

	if (cur != CONVERT_NONE && init_gpu_conversion(source, frame)) {
		source->async_gpu_conversion = true;

		source->async_convert_texrender =
774
			texrender_create(GS_BGRX, GS_ZS_NONE);
775 776 777 778

		source->async_texture = gs_create_texture(
				source->async_convert_width,
				source->async_convert_height,
779 780
				source->async_texture_format,
				1, NULL, GS_DYNAMIC);
781 782

	} else {
783 784
		enum gs_color_format format = convert_video_format(
				frame->format);
785 786 787 788
		source->async_gpu_conversion = false;

		source->async_texture = gs_create_texture(
				frame->width, frame->height,
789
				format, 1, NULL, GS_DYNAMIC);
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
	}

	if (!source->async_texture)
		return false;

	source->async_width  = frame->width;
	source->async_height = frame->height;
	return true;
}

static void upload_raw_frame(texture_t tex, const struct source_frame *frame)
{
	switch (get_convert_type(frame->format)) {
		case CONVERT_422_U:
		case CONVERT_422_Y:
			texture_setimage(tex, frame->data[0],
					frame->linesize[0], false);
			break;

		case CONVERT_420:
810 811 812 813 814
			texture_setimage(tex, frame->data[0],
					frame->width, false);
			break;

		case CONVERT_NV12:
815 816 817 818 819 820 821 822 823 824 825 826 827
			assert(false && "Conversion not yet implemented");
			break;

		case CONVERT_NONE:
			assert(false && "No conversion requested");
			break;
	}
}

static const char *select_conversion_technique(enum video_format format)
{
	switch (format) {
		case VIDEO_FORMAT_UYVY:
828
			return "UYVY_Reverse";
829 830 831 832 833 834 835 836

		case VIDEO_FORMAT_YUY2:
			return "YUY2_Reverse";

		case VIDEO_FORMAT_YVYU:
			return "YVYU_Reverse";

		case VIDEO_FORMAT_I420:
837 838 839
			return "I420_Reverse";

		case VIDEO_FORMAT_NV12:
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
			assert(false && "Conversion not yet implemented");
			break;

		case VIDEO_FORMAT_BGRA:
		case VIDEO_FORMAT_BGRX:
		case VIDEO_FORMAT_RGBA:
		case VIDEO_FORMAT_NONE:
			assert(false && "No conversion requested");
			break;
	}
	return NULL;
}

static inline void set_eparam(effect_t effect, const char *name, float val)
{
	eparam_t param = effect_getparambyname(effect, name);
856
	effect_setfloat(param, val);
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
}

static bool update_async_texrender(struct obs_source *source,
		const struct source_frame *frame)
{
	texture_t   tex       = source->async_texture;
	texrender_t texrender = source->async_convert_texrender;

	texrender_reset(texrender);

	upload_raw_frame(tex, frame);

	uint32_t cx = source->async_width;
	uint32_t cy = source->async_height;

872 873 874
	float convert_width  = (float)source->async_convert_width;
	float convert_height = (float)source->async_convert_height;

875 876 877 878 879 880 881 882 883 884
	effect_t conv = obs->video.conversion_effect;
	technique_t tech = effect_gettechnique(conv,
			select_conversion_technique(frame->format));

	if (!texrender_begin(texrender, cx, cy))
		return false;

	technique_begin(tech);
	technique_beginpass(tech, 0);

885
	effect_settexture(effect_getparambyname(conv, "image"), tex);
886 887 888 889
	set_eparam(conv, "width",  (float)cx);
	set_eparam(conv, "height", (float)cy);
	set_eparam(conv, "width_i",  1.0f / cx);
	set_eparam(conv, "height_i", 1.0f / cy);
890
	set_eparam(conv, "width_d2",  cx * 0.5f);
891
	set_eparam(conv, "height_d2", cy * 0.5f);
892
	set_eparam(conv, "width_d2_i",  1.0f / (cx * 0.5f));
893
	set_eparam(conv, "height_d2_i", 1.0f / (cy * 0.5f));
894 895 896 897 898 899 900 901 902 903
	set_eparam(conv, "input_width",  convert_width);
	set_eparam(conv, "input_height", convert_height);
	set_eparam(conv, "input_width_i",  1.0f / convert_width);
	set_eparam(conv, "input_height_i", 1.0f / convert_height);
	set_eparam(conv, "input_width_i_d2",  (1.0f / convert_width)  * 0.5f);
	set_eparam(conv, "input_height_i_d2", (1.0f / convert_height) * 0.5f);
	set_eparam(conv, "u_plane_offset",
			(float)source->async_plane_offset[0]);
	set_eparam(conv, "v_plane_offset",
			(float)source->async_plane_offset[1]);
904 905 906 907 908 909 910 911 912 913 914 915 916

	gs_ortho(0.f, (float)cx, 0.f, (float)cy, -100.f, 100.f);

	gs_draw_sprite(tex, 0, cx, cy);

	technique_endpass(tech);
	technique_end(tech);

	texrender_end(texrender);

	return true;
}

917 918
static bool update_async_texture(struct obs_source *source,
		const struct source_frame *frame)
919
{
920 921 922
	texture_t         tex       = source->async_texture;
	texrender_t       texrender = source->async_convert_texrender;
	enum convert_type type      = get_convert_type(frame->format);
923
	uint8_t           *ptr;
924 925
	uint32_t          linesize;

926 927 928
	source->async_format     = frame->format;
	source->async_flip       = frame->flip;
	source->async_full_range = frame->full_range;
929 930
	memcpy(source->async_color_matrix, frame->color_matrix,
			sizeof(frame->color_matrix));
931 932 933 934
	memcpy(source->async_color_range_min, frame->color_range_min,
			sizeof frame->color_range_min);
	memcpy(source->async_color_range_max, frame->color_range_max,
			sizeof frame->color_range_max);
935

936 937 938
	if (source->async_gpu_conversion && texrender)
		return update_async_texrender(source, frame);

939
	if (type == CONVERT_NONE) {
940
		texture_setimage(tex, frame->data[0], frame->linesize[0],
941
				false);
942 943 944
		return true;
	}

945
	if (!texture_map(tex, &ptr, &linesize))
946 947 948
		return false;

	if (type == CONVERT_420)
J
jp9000 已提交
949 950 951
		decompress_420((const uint8_t* const*)frame->data,
				frame->linesize,
				0, frame->height, ptr, linesize);
952 953

	else if (type == CONVERT_NV12)
J
jp9000 已提交
954 955 956
		decompress_nv12((const uint8_t* const*)frame->data,
				frame->linesize,
				0, frame->height, ptr, linesize);
957 958

	else if (type == CONVERT_422_Y)
959
		decompress_422(frame->data[0], frame->linesize[0],
J
jp9000 已提交
960
				0, frame->height, ptr, linesize, true);
961 962

	else if (type == CONVERT_422_U)
963
		decompress_422(frame->data[0], frame->linesize[0],
J
jp9000 已提交
964
				0, frame->height, ptr, linesize, false);
965 966 967 968 969

	texture_unmap(tex);
	return true;
}

970
static inline void obs_source_draw_texture(struct obs_source *source,
971 972
		effect_t effect, float *color_matrix,
		float const *color_range_min, float const *color_range_max)
973
{
974 975
	texture_t tex = source->async_texture;
	eparam_t  param;
976

977 978 979
	if (source->async_convert_texrender)
		tex = texrender_gettexture(source->async_convert_texrender);

P
Palana 已提交
980
	if (color_range_min) {
981 982
		size_t const size = sizeof(float) * 3;
		param = effect_getparambyname(effect, "color_range_min");
983
		effect_setval(param, color_range_min, size);
P
Palana 已提交
984
	}
985

P
Palana 已提交
986 987
	if (color_range_max) {
		size_t const size = sizeof(float) * 3;
988
		param = effect_getparambyname(effect, "color_range_max");
989
		effect_setval(param, color_range_max, size);
P
Palana 已提交
990
	}
991

P
Palana 已提交
992
	if (color_matrix) {
993
		param = effect_getparambyname(effect, "color_matrix");
994
		effect_setval(param, color_matrix, sizeof(float) * 16);
995 996
	}

J
jp9000 已提交
997
	param = effect_getparambyname(effect, "image");
998
	effect_settexture(param, tex);
999

1000 1001
	gs_draw_sprite(tex, source->async_flip ? GS_FLIP_V : 0, 0, 0);
}
1002

1003 1004
static void obs_source_draw_async_texture(struct obs_source *source)
{
1005 1006 1007 1008 1009
	effect_t    effect        = gs_geteffect();
	bool        yuv           = format_is_yuv(source->async_format);
	bool        limited_range = yuv && !source->async_full_range;
	const char  *type         = yuv ? "DrawMatrix" : "Draw";
	bool        def_draw      = (!effect);
1010
	technique_t tech          = NULL;
1011 1012 1013 1014 1015 1016 1017 1018 1019

	if (def_draw) {
		effect = obs_get_default_effect();
		tech = effect_gettechnique(effect, type);
		technique_begin(tech);
		technique_beginpass(tech, 0);
	}

	obs_source_draw_texture(source, effect,
1020 1021 1022
			yuv ? source->async_color_matrix : NULL,
			limited_range ? source->async_color_range_min : NULL,
			limited_range ? source->async_color_range_max : NULL);
1023 1024 1025 1026 1027

	if (def_draw) {
		technique_endpass(tech);
		technique_end(tech);
	}
1028 1029
}

1030 1031 1032
static void obs_source_render_async_video(obs_source_t source)
{
	struct source_frame *frame = obs_source_getframe(source);
1033 1034 1035 1036 1037 1038
	if (frame) {
		if (!set_async_texture_size(source, frame))
			return;
		if (!update_async_texture(source, frame))
			return;
	}
1039

1040 1041
	if (source->async_texture)
		obs_source_draw_async_texture(source);
1042 1043 1044 1045

	obs_source_releaseframe(source, frame);
}

1046 1047 1048 1049 1050 1051 1052
static inline void obs_source_render_filters(obs_source_t source)
{
	source->rendering_filter = true;
	obs_source_video_render(source->filters.array[0]);
	source->rendering_filter = false;
}

J
jp9000 已提交
1053 1054
static inline void obs_source_default_render(obs_source_t source,
		bool color_matrix)
1055 1056
{
	effect_t    effect     = obs->video.default_effect;
J
jp9000 已提交
1057
	const char  *tech_name = color_matrix ? "DrawMatrix" : "Draw";
1058 1059 1060 1061 1062 1063
	technique_t tech       = effect_gettechnique(effect, tech_name);
	size_t      passes, i;

	passes = technique_begin(tech);
	for (i = 0; i < passes; i++) {
		technique_beginpass(tech, i);
1064 1065
		if (source->context.data)
			source->info.video_render(source->context.data, effect);
1066 1067 1068 1069 1070 1071 1072
		technique_endpass(tech);
	}
	technique_end(tech);
}

static inline void obs_source_main_render(obs_source_t source)
{
1073 1074 1075
	uint32_t flags      = source->info.output_flags;
	bool color_matrix   = (flags & OBS_SOURCE_COLOR_MATRIX) != 0;
	bool custom_draw    = (flags & OBS_SOURCE_CUSTOM_DRAW) != 0;
1076 1077
	bool default_effect = !source->filter_parent &&
	                      source->filters.num == 0 &&
1078
	                      !custom_draw;
1079 1080

	if (default_effect)
J
jp9000 已提交
1081
		obs_source_default_render(source, color_matrix);
1082
	else if (source->context.data)
1083
		source->info.video_render(source->context.data,
1084
				custom_draw ? NULL : gs_geteffect());
1085 1086
}

1087
void obs_source_video_render(obs_source_t source)
J
jp9000 已提交
1088
{
1089
	if (!source_valid(source)) return;
J
jp9000 已提交
1090

1091 1092
	if (source->filters.num && !source->rendering_filter)
		obs_source_render_filters(source);
1093

1094 1095 1096 1097
	else if (source->info.video_render)
		obs_source_main_render(source);

	else if (source->filter_target)
1098 1099
		obs_source_video_render(source->filter_target);

1100
	else
1101
		obs_source_render_async_video(source);
J
jp9000 已提交
1102 1103
}

J
jp9000 已提交
1104
uint32_t obs_source_getwidth(obs_source_t source)
J
jp9000 已提交
1105
{
1106
	if (!source_valid(source)) return 0;
1107 1108

	if (source->info.getwidth)
1109
		return source->info.getwidth(source->context.data);
1110
	return source->async_width;
J
jp9000 已提交
1111 1112
}

J
jp9000 已提交
1113
uint32_t obs_source_getheight(obs_source_t source)
J
jp9000 已提交
1114
{
1115
	if (!source_valid(source)) return 0;
1116 1117

	if (source->info.getheight)
1118
		return source->info.getheight(source->context.data);
1119
	return source->async_height;
J
jp9000 已提交
1120 1121
}

1122 1123
obs_source_t obs_filter_getparent(obs_source_t filter)
{
J
jp9000 已提交
1124
	return filter ? filter->filter_parent : NULL;
1125 1126
}

1127
obs_source_t obs_filter_gettarget(obs_source_t filter)
J
jp9000 已提交
1128
{
J
jp9000 已提交
1129
	return filter ? filter->filter_target : NULL;
J
jp9000 已提交
1130 1131
}

1132
void obs_source_filter_add(obs_source_t source, obs_source_t filter)
J
jp9000 已提交
1133
{
J
jp9000 已提交
1134 1135 1136
	if (!source || !filter)
		return;

1137 1138
	pthread_mutex_lock(&source->filter_mutex);

J
jp9000 已提交
1139
	if (da_find(source->filters, &filter, 0) != DARRAY_INVALID) {
J
jp9000 已提交
1140 1141 1142 1143 1144 1145
		blog(LOG_WARNING, "Tried to add a filter that was already "
		                  "present on the source");
		return;
	}

	if (source->filters.num) {
1146
		obs_source_t *back = da_end(source->filters);
J
jp9000 已提交
1147 1148 1149 1150
		(*back)->filter_target = filter;
	}

	da_push_back(source->filters, &filter);
1151 1152 1153 1154

	pthread_mutex_unlock(&source->filter_mutex);

	filter->filter_parent = source;
J
jp9000 已提交
1155 1156 1157
	filter->filter_target = source;
}

1158
void obs_source_filter_remove(obs_source_t source, obs_source_t filter)
J
jp9000 已提交
1159
{
1160 1161
	size_t idx;

J
jp9000 已提交
1162 1163 1164
	if (!source || !filter)
		return;

1165 1166 1167
	pthread_mutex_lock(&source->filter_mutex);

	idx = da_find(source->filters, &filter, 0);
J
jp9000 已提交
1168
	if (idx == DARRAY_INVALID)
J
jp9000 已提交
1169 1170 1171
		return;

	if (idx > 0) {
1172
		obs_source_t prev = source->filters.array[idx-1];
J
jp9000 已提交
1173 1174 1175 1176
		prev->filter_target = filter->filter_target;
	}

	da_erase(source->filters, idx);
1177 1178 1179 1180

	pthread_mutex_unlock(&source->filter_mutex);

	filter->filter_parent = NULL;
J
jp9000 已提交
1181 1182 1183
	filter->filter_target = NULL;
}

1184
void obs_source_filter_setorder(obs_source_t source, obs_source_t filter,
J
jp9000 已提交
1185 1186
		enum order_movement movement)
{
J
jp9000 已提交
1187 1188 1189 1190 1191 1192
	size_t idx, i;

	if (!source || !filter)
		return;

	idx = da_find(source->filters, &filter, 0);
J
jp9000 已提交
1193
	if (idx == DARRAY_INVALID)
J
jp9000 已提交
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
		return;

	if (movement == ORDER_MOVE_UP) {
		if (idx == source->filters.num-1)
			return;
		da_move_item(source->filters, idx, idx+1);

	} else if (movement == ORDER_MOVE_DOWN) {
		if (idx == 0)
			return;
		da_move_item(source->filters, idx, idx-1);

	} else if (movement == ORDER_MOVE_TOP) {
		if (idx == source->filters.num-1)
			return;
		da_move_item(source->filters, idx, source->filters.num-1);

	} else if (movement == ORDER_MOVE_BOTTOM) {
		if (idx == 0)
			return;
		da_move_item(source->filters, idx, 0);
	}

1217
	/* reorder filter targets, not the nicest way of dealing with things */
J
jp9000 已提交
1218
	for (i = 0; i < source->filters.num; i++) {
1219
		obs_source_t next_filter = (i == source->filters.num-1) ?
J
jp9000 已提交
1220 1221 1222 1223 1224
			source : source->filters.array[idx+1];
		source->filters.array[i]->filter_target = next_filter;
	}
}

1225
obs_data_t obs_source_getsettings(obs_source_t source)
J
jp9000 已提交
1226
{
J
jp9000 已提交
1227 1228
	if (!source) return NULL;

1229 1230
	obs_data_addref(source->context.settings);
	return source->context.settings;
J
jp9000 已提交
1231 1232
}

1233 1234
static inline struct source_frame *filter_async_video(obs_source_t source,
		struct source_frame *in)
1235 1236 1237 1238
{
	size_t i;
	for (i = source->filters.num; i > 0; i--) {
		struct obs_source *filter = source->filters.array[i-1];
1239 1240

		if (filter->context.data && filter->info.filter_video) {
1241 1242
			in = filter->info.filter_video(filter->context.data,
					in);
1243 1244 1245 1246 1247 1248 1249 1250
			if (!in)
				return NULL;
		}
	}

	return in;
}

1251 1252 1253
static inline void copy_frame_data_line(struct source_frame *dst,
		const struct source_frame *src, uint32_t plane, uint32_t y)
{
1254 1255 1256 1257
	uint32_t pos_src = y * src->linesize[plane];
	uint32_t pos_dst = y * dst->linesize[plane];
	uint32_t bytes = dst->linesize[plane] < src->linesize[plane] ?
		dst->linesize[plane] : src->linesize[plane];
1258 1259 1260 1261 1262 1263 1264

	memcpy(dst->data[plane] + pos_dst, src->data[plane] + pos_src, bytes);
}

static inline void copy_frame_data_plane(struct source_frame *dst,
		const struct source_frame *src, uint32_t plane, uint32_t lines)
{
1265
	if (dst->linesize[plane] != src->linesize[plane])
1266 1267 1268 1269
		for (uint32_t y = 0; y < lines; y++)
			copy_frame_data_line(dst, src, plane, y);
	else
		memcpy(dst->data[plane], src->data[plane],
1270
				dst->linesize[plane] * lines);
1271 1272 1273 1274 1275 1276
}

static void copy_frame_data(struct source_frame *dst,
		const struct source_frame *src)
{
	dst->flip         = src->flip;
1277
	dst->full_range   = src->full_range;
1278 1279
	dst->timestamp    = src->timestamp;
	memcpy(dst->color_matrix, src->color_matrix, sizeof(float) * 16);
1280 1281 1282 1283 1284
	if (!dst->full_range) {
		size_t const size = sizeof(float) * 3;
		memcpy(dst->color_range_min, src->color_range_min, size);
		memcpy(dst->color_range_max, src->color_range_max, size);
	}
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308

	switch (dst->format) {
	case VIDEO_FORMAT_I420:
		copy_frame_data_plane(dst, src, 0, dst->height);
		copy_frame_data_plane(dst, src, 1, dst->height/2);
		copy_frame_data_plane(dst, src, 2, dst->height/2);
		break;

	case VIDEO_FORMAT_NV12:
		copy_frame_data_plane(dst, src, 0, dst->height);
		copy_frame_data_plane(dst, src, 1, dst->height/2);
		break;

	case VIDEO_FORMAT_YVYU:
	case VIDEO_FORMAT_YUY2:
	case VIDEO_FORMAT_UYVY:
	case VIDEO_FORMAT_NONE:
	case VIDEO_FORMAT_RGBA:
	case VIDEO_FORMAT_BGRA:
	case VIDEO_FORMAT_BGRX:
		copy_frame_data_plane(dst, src, 0, dst->height);
	}
}

J
jp9000 已提交
1309
static inline struct source_frame *cache_video(const struct source_frame *frame)
1310
{
1311
	/* TODO: use an actual cache */
1312
	struct source_frame *new_frame = source_frame_create(frame->format,
1313
			frame->width, frame->height);
1314

1315
	copy_frame_data(new_frame, frame);
1316
	return new_frame;
1317 1318
}

1319
static bool ready_async_frame(obs_source_t source, uint64_t sys_time);
1320 1321 1322 1323

static inline void cycle_frames(struct obs_source *source)
{
	if (source->video_frames.num && !source->activate_refs)
1324
		ready_async_frame(source, os_gettime_ns());
1325 1326
}

1327
void obs_source_output_video(obs_source_t source,
1328
		const struct source_frame *frame)
1329
{
J
jp9000 已提交
1330 1331 1332
	if (!source || !frame)
		return;

J
jp9000 已提交
1333
	struct source_frame *output = cache_video(frame);
1334 1335 1336 1337 1338

	pthread_mutex_lock(&source->filter_mutex);
	output = filter_async_video(source, output);
	pthread_mutex_unlock(&source->filter_mutex);

1339 1340
	if (output) {
		pthread_mutex_lock(&source->video_mutex);
1341
		cycle_frames(source);
1342 1343 1344
		da_push_back(source->video_frames, &output);
		pthread_mutex_unlock(&source->video_mutex);
	}
1345 1346
}

1347 1348
static inline struct filtered_audio *filter_async_audio(obs_source_t source,
		struct filtered_audio *in)
1349 1350 1351 1352
{
	size_t i;
	for (i = source->filters.num; i > 0; i--) {
		struct obs_source *filter = source->filters.array[i-1];
1353 1354

		if (filter->context.data && filter->info.filter_audio) {
1355 1356
			in = filter->info.filter_audio(filter->context.data,
					in);
1357 1358 1359 1360 1361 1362 1363 1364
			if (!in)
				return NULL;
		}
	}

	return in;
}

1365
static inline void reset_resampler(obs_source_t source,
1366 1367
		const struct source_audio *audio)
{
J
jp9000 已提交
1368
	const struct audio_output_info *obs_info;
1369 1370
	struct resample_info output_info;

1371 1372
	obs_info = audio_output_getinfo(obs->audio.audio);

1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
	output_info.format           = obs_info->format;
	output_info.samples_per_sec  = obs_info->samples_per_sec;
	output_info.speakers         = obs_info->speakers;

	source->sample_info.format          = audio->format;
	source->sample_info.samples_per_sec = audio->samples_per_sec;
	source->sample_info.speakers        = audio->speakers;

	if (source->sample_info.samples_per_sec == obs_info->samples_per_sec &&
	    source->sample_info.format          == obs_info->format          &&
	    source->sample_info.speakers        == obs_info->speakers) {
		source->audio_failed = false;
		return;
	}

	audio_resampler_destroy(source->resampler);
	source->resampler = audio_resampler_create(&output_info,
			&source->sample_info);

	source->audio_failed = source->resampler == NULL;
	if (source->resampler == NULL)
		blog(LOG_ERROR, "creation of resampler failed");
}

static inline void copy_audio_data(obs_source_t source,
J
jp9000 已提交
1398
		const uint8_t *const data[], uint32_t frames, uint64_t ts)
1399
{
1400
	size_t planes    = audio_output_planes(obs->audio.audio);
1401
	size_t blocksize = audio_output_blocksize(obs->audio.audio);
1402 1403
	size_t size      = (size_t)frames * blocksize;
	bool   resize    = source->audio_storage_size < size;
1404

J
jp9000 已提交
1405 1406
	source->audio_data.frames    = frames;
	source->audio_data.timestamp = ts;
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419

	for (size_t i = 0; i < planes; i++) {
		/* ensure audio storage capacity */
		if (resize) {
			bfree(source->audio_data.data[i]);
			source->audio_data.data[i] = bmalloc(size);
		}

		memcpy(source->audio_data.data[i], data[i], size);
	}

	if (resize)
		source->audio_storage_size = size;
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
}

/* resamples/remixes new audio to the designated main audio output format */
static void process_audio(obs_source_t source, const struct source_audio *audio)
{
	if (source->sample_info.samples_per_sec != audio->samples_per_sec ||
	    source->sample_info.format          != audio->format          ||
	    source->sample_info.speakers        != audio->speakers)
		reset_resampler(source, audio);

	if (source->audio_failed)
		return;

	if (source->resampler) {
J
jp9000 已提交
1434
		uint8_t  *output[MAX_AV_PLANES];
1435 1436 1437
		uint32_t frames;
		uint64_t offset;

1438 1439 1440 1441 1442
		memset(output, 0, sizeof(output));

		audio_resampler_resample(source->resampler,
				output, &frames, &offset,
				audio->data, audio->frames);
1443

J
jp9000 已提交
1444
		copy_audio_data(source, (const uint8_t *const *)output, frames,
1445 1446 1447 1448 1449
				audio->timestamp - offset);
	} else {
		copy_audio_data(source, audio->data, audio->frames,
				audio->timestamp);
	}
1450 1451 1452 1453 1454
}

void obs_source_output_audio(obs_source_t source,
		const struct source_audio *audio)
{
J
jp9000 已提交
1455
	uint32_t flags;
1456
	struct filtered_audio *output;
1457

J
jp9000 已提交
1458 1459 1460 1461
	if (!source || !audio)
		return;

	flags = source->info.output_flags;
1462
	process_audio(source, audio);
1463 1464

	pthread_mutex_lock(&source->filter_mutex);
1465
	output = filter_async_audio(source, &source->audio_data);
1466 1467

	if (output) {
1468
		bool async = (flags & OBS_SOURCE_ASYNC) != 0;
J
jp9000 已提交
1469

1470 1471
		pthread_mutex_lock(&source->audio_mutex);

1472 1473
		/* wait for video to start before outputting any audio so we
		 * have a base for sync */
1474
		if (source->timing_set || !async) {
1475
			struct audio_data data;
1476

J
jp9000 已提交
1477
			for (int i = 0; i < MAX_AV_PLANES; i++)
1478 1479
				data.data[i] = output->data[i];

1480 1481 1482
			data.frames    = output->frames;
			data.timestamp = output->timestamp;
			source_output_audio_line(source, &data);
1483 1484 1485 1486 1487 1488 1489 1490
		}

		pthread_mutex_unlock(&source->audio_mutex);
	}

	pthread_mutex_unlock(&source->filter_mutex);
}

1491 1492 1493 1494 1495
static inline bool frame_out_of_bounds(obs_source_t source, uint64_t ts)
{
	return ((ts - source->last_frame_ts) > MAX_TIMESTAMP_JUMP);
}

1496
static bool ready_async_frame(obs_source_t source, uint64_t sys_time)
1497 1498 1499 1500 1501 1502 1503 1504 1505 1506
{
	struct source_frame *next_frame = source->video_frames.array[0];
	struct source_frame *frame      = NULL;
	uint64_t sys_offset = sys_time - source->last_sys_timestamp;
	uint64_t frame_time = next_frame->timestamp;
	uint64_t frame_offset = 0;

	/* account for timestamp invalidation */
	if (frame_out_of_bounds(source, frame_time)) {
		source->last_frame_ts = next_frame->timestamp;
1507
		os_atomic_inc_long(&source->av_sync_ref);
1508 1509
	} else {
		frame_offset = frame_time - source->last_frame_ts;
1510
		source->last_frame_ts += frame_offset;
1511 1512 1513 1514 1515
	}

	while (frame_offset <= sys_offset) {
		source_frame_destroy(frame);

1516 1517 1518
		if (source->video_frames.num == 1)
			return true;

1519 1520 1521 1522 1523 1524 1525 1526
		frame = next_frame;
		da_erase(source->video_frames, 0);
		next_frame = source->video_frames.array[0];

		/* more timestamp checking and compensating */
		if ((next_frame->timestamp - frame_time) > MAX_TIMESTAMP_JUMP) {
			source->last_frame_ts =
				next_frame->timestamp - frame_offset;
1527
			os_atomic_inc_long(&source->av_sync_ref);
1528 1529 1530 1531 1532 1533
		}

		frame_time   = next_frame->timestamp;
		frame_offset = frame_time - source->last_frame_ts;
	}

1534 1535
	source_frame_destroy(frame);

1536 1537 1538 1539 1540 1541
	return frame != NULL;
}

static inline struct source_frame *get_closest_frame(obs_source_t source,
		uint64_t sys_time)
{
1542
	if (ready_async_frame(source, sys_time)) {
1543 1544 1545 1546 1547 1548
		struct source_frame *frame = source->video_frames.array[0];
		da_erase(source->video_frames, 0);
		return frame;
	}

	return NULL;
1549 1550
}

1551
/*
1552 1553
 * Ensures that cached frames are displayed on time.  If multiple frames
 * were cached between renders, then releases the unnecessary frames and uses
1554 1555
 * the frame with the closest timing to ensure sync.  Also ensures that timing
 * with audio is synchronized.
1556
 */
1557
struct source_frame *obs_source_getframe(obs_source_t source)
J
jp9000 已提交
1558
{
1559 1560
	struct source_frame *frame = NULL;
	uint64_t sys_time;
1561

J
jp9000 已提交
1562 1563 1564
	if (!source)
		return NULL;

1565 1566 1567 1568 1569
	pthread_mutex_lock(&source->video_mutex);

	if (!source->video_frames.num)
		goto unlock;

1570
	sys_time = os_gettime_ns();
1571

1572 1573
	if (!source->last_frame_ts) {
		frame = source->video_frames.array[0];
1574 1575
		da_erase(source->video_frames, 0);

1576
		source->last_frame_ts = frame->timestamp;
1577
	} else {
1578
		frame = get_closest_frame(source, sys_time);
J
jp9000 已提交
1579 1580 1581 1582 1583 1584
	}

	/* reset timing to current system time */
	if (frame) {
		source->timing_adjust = sys_time - frame->timestamp;
		source->timing_set = true;
1585 1586 1587 1588 1589 1590
	}

	source->last_sys_timestamp = sys_time;

unlock:
	pthread_mutex_unlock(&source->video_mutex);
1591

1592
	if (frame)
1593 1594
		obs_source_addref(source);

1595
	return frame;
J
jp9000 已提交
1596 1597
}

1598
void obs_source_releaseframe(obs_source_t source, struct source_frame *frame)
J
jp9000 已提交
1599
{
J
jp9000 已提交
1600
	if (source && frame) {
1601 1602 1603
		source_frame_destroy(frame);
		obs_source_release(source);
	}
J
jp9000 已提交
1604
}
1605 1606 1607

const char *obs_source_getname(obs_source_t source)
{
1608
	return source ? source->context.name : NULL;
1609 1610 1611 1612
}

void obs_source_setname(obs_source_t source, const char *name)
{
J
jp9000 已提交
1613
	if (!source) return;
J
jp9000 已提交
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628

	if (!name || !*name || strcmp(name, source->context.name) != 0) {
		struct calldata data;
		char *prev_name = bstrdup(source->context.name);
		obs_context_data_setname(&source->context, name);

		calldata_init(&data);
		calldata_setptr(&data, "source", source);
		calldata_setstring(&data, "new_name", source->context.name);
		calldata_setstring(&data, "prev_name", prev_name);
		signal_handler_signal(obs->signals, "source_rename", &data);
		signal_handler_signal(source->context.signals, "rename", &data);
		calldata_free(&data);
		bfree(prev_name);
	}
1629 1630
}

1631
void obs_source_gettype(obs_source_t source, enum obs_source_type *type,
1632 1633
		const char **id)
{
J
jp9000 已提交
1634 1635
	if (!source) return;

J
jp9000 已提交
1636
	if (type) *type = source->info.type;
J
jp9000 已提交
1637
	if (id)   *id   = source->info.id;
1638
}
1639 1640

static inline void render_filter_bypass(obs_source_t target, effect_t effect,
J
jp9000 已提交
1641
		bool use_matrix)
1642
{
J
jp9000 已提交
1643
	const char  *tech_name = use_matrix ? "DrawMatrix" : "Draw";
1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
	technique_t tech       = effect_gettechnique(effect, tech_name);
	size_t      passes, i;

	passes = technique_begin(tech);
	for (i = 0; i < passes; i++) {
		technique_beginpass(tech, i);
		obs_source_video_render(target);
		technique_endpass(tech);
	}
	technique_end(tech);
}

static inline void render_filter_tex(texture_t tex, effect_t effect,
J
jp9000 已提交
1657
		uint32_t width, uint32_t height, bool use_matrix)
1658
{
J
jp9000 已提交
1659
	const char  *tech_name = use_matrix ? "DrawMatrix" : "Draw";
1660
	technique_t tech       = effect_gettechnique(effect, tech_name);
J
jp9000 已提交
1661
	eparam_t    image      = effect_getparambyname(effect, "image");
1662 1663
	size_t      passes, i;

1664
	effect_settexture(image, tex);
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674

	passes = technique_begin(tech);
	for (i = 0; i < passes; i++) {
		technique_beginpass(tech, i);
		gs_draw_sprite(tex, width, height, 0);
		technique_endpass(tech);
	}
	technique_end(tech);
}

J
jp9000 已提交
1675 1676
void obs_source_process_filter(obs_source_t filter, effect_t effect,
		uint32_t width, uint32_t height, enum gs_color_format format,
1677
		enum allow_direct_render allow_direct)
1678
{
J
jp9000 已提交
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
	obs_source_t target, parent;
	uint32_t     target_flags, parent_flags;
	int          cx, cy;
	bool         use_matrix, expects_def, can_directly;

	if (!filter) return;

	target       = obs_filter_gettarget(filter);
	parent       = obs_filter_getparent(filter);
	target_flags = target->info.output_flags;
	parent_flags = parent->info.output_flags;
	cx           = obs_source_getwidth(target);
	cy           = obs_source_getheight(target);
	use_matrix   = !!(target_flags & OBS_SOURCE_COLOR_MATRIX);
	expects_def  = !(parent_flags & OBS_SOURCE_CUSTOM_DRAW);
	can_directly = allow_direct == ALLOW_DIRECT_RENDERING;
1695 1696 1697 1698 1699

	/* if the parent does not use any custom effects, and this is the last
	 * filter in the chain for the parent, then render the parent directly
	 * using the filter effect instead of rendering to texture to reduce
	 * the total number of passes */
1700
	if (can_directly && expects_def && target == parent) {
J
jp9000 已提交
1701
		render_filter_bypass(target, effect, use_matrix);
1702 1703 1704
		return;
	}

J
jp9000 已提交
1705 1706 1707 1708 1709
	if (!filter->filter_texrender)
		filter->filter_texrender = texrender_create(format,
				GS_ZS_NONE);

	if (texrender_begin(filter->filter_texrender, cx, cy)) {
1710
		gs_ortho(0.0f, (float)cx, 0.0f, (float)cy, -100.0f, 100.0f);
1711
		if (expects_def && parent == target)
J
jp9000 已提交
1712
			obs_source_default_render(parent, use_matrix);
1713 1714
		else
			obs_source_video_render(target);
J
jp9000 已提交
1715
		texrender_end(filter->filter_texrender);
1716 1717 1718 1719
	}

	/* --------------------------- */

J
jp9000 已提交
1720 1721
	render_filter_tex(texrender_gettexture(filter->filter_texrender),
			effect, width, height, use_matrix);
1722
}
1723 1724 1725

signal_handler_t obs_source_signalhandler(obs_source_t source)
{
1726
	return source ? source->context.signals : NULL;
1727 1728 1729 1730
}

proc_handler_t obs_source_prochandler(obs_source_t source)
{
1731
	return source ? source->context.procs : NULL;
1732
}
J
jp9000 已提交
1733 1734 1735

void obs_source_setvolume(obs_source_t source, float volume)
{
J
jp9000 已提交
1736 1737 1738 1739 1740
	if (source) {
		struct calldata data = {0};
		calldata_setptr(&data, "source", source);
		calldata_setfloat(&data, "volume", volume);

1741
		signal_handler_signal(source->context.signals, "volume", &data);
1742
		signal_handler_signal(obs->signals, "source_volume", &data);
J
jp9000 已提交
1743

1744
		volume = (float)calldata_float(&data, "volume");
J
jp9000 已提交
1745 1746
		calldata_free(&data);

J
jp9000 已提交
1747
		source->user_volume = volume;
J
jp9000 已提交
1748
	}
J
jp9000 已提交
1749 1750
}

J
jp9000 已提交
1751 1752 1753 1754 1755 1756 1757 1758 1759
static void set_tree_preset_vol(obs_source_t parent, obs_source_t child,
		void *param)
{
	float *vol = param;
	child->present_volume = *vol;

	UNUSED_PARAMETER(parent);
}

J
jp9000 已提交
1760 1761
void obs_source_set_present_volume(obs_source_t source, float volume)
{
J
jp9000 已提交
1762
	if (source) {
J
jp9000 已提交
1763
		source->present_volume = volume;
J
jp9000 已提交
1764 1765 1766 1767 1768 1769 1770 1771

		/* don't set the presentation volume of the tree if a
		 * transition source, let the transition handle presentation
		 * volume for the child sources itself. */
		if (source->info.type != OBS_SOURCE_TYPE_TRANSITION)
			obs_source_enum_tree(source, set_tree_preset_vol,
					&volume);
	}
J
jp9000 已提交
1772 1773 1774 1775
}

float obs_source_getvolume(obs_source_t source)
{
J
jp9000 已提交
1776
	return source ? source->user_volume : 0.0f;
J
jp9000 已提交
1777 1778 1779 1780
}

float obs_source_get_present_volume(obs_source_t source)
{
J
jp9000 已提交
1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
	return source ? source->present_volume : 0.0f;
}

void obs_source_set_sync_offset(obs_source_t source, int64_t offset)
{
	if (source)
		source->sync_offset = offset;
}

int64_t obs_source_get_sync_offset(obs_source_t source)
{
	return source ? source->sync_offset : 0;
J
jp9000 已提交
1793
}
1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805

struct source_enum_data {
	obs_source_enum_proc_t enum_callback;
	void *param;
};

static void enum_source_tree_callback(obs_source_t parent, obs_source_t child,
		void *param)
{
	struct source_enum_data *data = param;

	if (child->info.enum_sources && !child->enum_refs) {
J
jp9000 已提交
1806
		os_atomic_inc_long(&child->enum_refs);
1807

1808 1809 1810
		if (child->context.data)
			child->info.enum_sources(child->context.data,
					enum_source_tree_callback, data);
1811

J
jp9000 已提交
1812
		os_atomic_dec_long(&child->enum_refs);
1813 1814 1815 1816 1817 1818 1819 1820 1821
	}

	data->enum_callback(parent, child, data->param);
}

void obs_source_enum_sources(obs_source_t source,
		obs_source_enum_proc_t enum_callback,
		void *param)
{
1822 1823 1824
	if (!source_valid(source)      ||
	    !source->info.enum_sources ||
	    source->enum_refs)
1825 1826 1827 1828
		return;

	obs_source_addref(source);

J
jp9000 已提交
1829
	os_atomic_inc_long(&source->enum_refs);
1830
	source->info.enum_sources(source->context.data, enum_callback, param);
J
jp9000 已提交
1831
	os_atomic_dec_long(&source->enum_refs);
1832 1833 1834 1835 1836 1837 1838 1839 1840 1841

	obs_source_release(source);
}

void obs_source_enum_tree(obs_source_t source,
		obs_source_enum_proc_t enum_callback,
		void *param)
{
	struct source_enum_data data = {enum_callback, param};

1842 1843 1844
	if (!source_valid(source)      ||
	    !source->info.enum_sources ||
	    source->enum_refs)
1845 1846 1847 1848
		return;

	obs_source_addref(source);

J
jp9000 已提交
1849
	os_atomic_inc_long(&source->enum_refs);
1850 1851
	source->info.enum_sources(source->context.data,
			enum_source_tree_callback,
1852
			&data);
J
jp9000 已提交
1853
	os_atomic_dec_long(&source->enum_refs);
1854 1855 1856

	obs_source_release(source);
}
1857 1858 1859 1860 1861

void obs_source_add_child(obs_source_t parent, obs_source_t child)
{
	if (!parent || !child) return;

1862 1863 1864 1865 1866
	for (int i = 0; i < parent->show_refs; i++) {
		enum view_type type;
		type = (i < parent->activate_refs) ? MAIN_VIEW : AUX_VIEW;
		obs_source_activate(child, type);
	}
1867 1868 1869 1870 1871 1872
}

void obs_source_remove_child(obs_source_t parent, obs_source_t child)
{
	if (!parent || !child) return;

1873 1874 1875 1876 1877
	for (int i = 0; i < parent->show_refs; i++) {
		enum view_type type;
		type = (i < parent->activate_refs) ? MAIN_VIEW : AUX_VIEW;
		obs_source_deactivate(child, type);
	}
1878
}
J
jp9000 已提交
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925

static void reset_transition_vol(obs_source_t parent, obs_source_t child,
		void *param)
{
	child->transition_volume = 0.0f;

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

static void add_transition_vol(obs_source_t parent, obs_source_t child,
		void *param)
{
	float *vol = param;
	child->transition_volume += *vol;

	UNUSED_PARAMETER(parent);
}

static void apply_transition_vol(obs_source_t parent, obs_source_t child,
		void *param)
{
	child->present_volume = child->transition_volume;

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

void obs_transition_begin_frame(obs_source_t transition)
{
	if (!transition) return;
	obs_source_enum_tree(transition, reset_transition_vol, NULL);
}

void obs_source_set_transition_vol(obs_source_t source, float vol)
{
	if (!source) return;

	add_transition_vol(NULL, source, &vol);
	obs_source_enum_tree(source, add_transition_vol, &vol);
}

void obs_transition_end_frame(obs_source_t transition)
{
	if (!transition) return;
	obs_source_enum_tree(transition, apply_transition_vol, NULL);
}
1926 1927 1928

void obs_source_save(obs_source_t source)
{
1929
	if (!source_valid(source) || !source->info.save) return;
1930 1931 1932 1933 1934
	source->info.save(source->context.data, source->context.settings);
}

void obs_source_load(obs_source_t source)
{
1935
	if (!source_valid(source) || !source->info.load) return;
1936 1937
	source->info.load(source->context.data, source->context.settings);
}