obs-source.c 53.4 KB
Newer Older
J
jp9000 已提交
1
/******************************************************************************
2
    Copyright (C) 2013-2014 by Hugh Bailey <obs.jim@gmail.com>
J
jp9000 已提交
3 4 5

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
6
    the Free Software Foundation, either version 2 of the License, or
J
jp9000 已提交
7 8 9 10 11 12 13 14 15 16 17
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/

18 19
#include <inttypes.h>

20
#include "media-io/format-conversion.h"
21
#include "media-io/video-frame.h"
22
#include "media-io/audio-io.h"
J
jp9000 已提交
23
#include "util/threading.h"
24
#include "util/platform.h"
25
#include "callback/calldata.h"
26 27
#include "graphics/matrix3.h"
#include "graphics/vec3.h"
28

J
jp9000 已提交
29
#include "obs.h"
J
jp9000 已提交
30
#include "obs-internal.h"
J
jp9000 已提交
31

32 33 34 35 36
static inline bool source_valid(struct obs_source *source)
{
	return source && source->context.data;
}

37
const struct obs_source_info *find_source(struct darray *list, const char *id)
J
jp9000 已提交
38 39
{
	size_t i;
J
jp9000 已提交
40
	struct obs_source_info *array = list->array;
J
jp9000 已提交
41 42

	for (i = 0; i < list->num; i++) {
J
jp9000 已提交
43
		struct obs_source_info *info = array+i;
44
		if (strcmp(info->id, id) == 0)
J
jp9000 已提交
45 46 47 48 49 50
			return info;
	}

	return NULL;
}

J
jp9000 已提交
51
static const struct obs_source_info *get_source_info(enum obs_source_type type,
52 53 54 55 56
		const char *id)
{
	struct darray *list = NULL;

	switch (type) {
J
jp9000 已提交
57 58 59 60 61 62 63 64 65 66 67
	case OBS_SOURCE_TYPE_INPUT:
		list = &obs->input_types.da;
		break;

	case OBS_SOURCE_TYPE_FILTER:
		list = &obs->filter_types.da;
		break;

	case OBS_SOURCE_TYPE_TRANSITION:
		list = &obs->transition_types.da;
		break;
68 69 70 71 72
	}

	return find_source(list, id);
}

73 74 75 76 77 78 79 80
static const char *source_signals[] = {
	"void destroy(ptr source)",
	"void add(ptr source)",
	"void remove(ptr source)",
	"void activate(ptr source)",
	"void deactivate(ptr source)",
	"void show(ptr source)",
	"void hide(ptr source)",
J
jp9000 已提交
81
	"void rename(ptr source, string new_name, string prev_name)",
82
	"void volume(ptr source, in out float volume)",
83 84
	"void volume_level(ptr source, float level, float magnitude, "
		"float peak)",
85 86 87
	NULL
};

88
bool obs_source_init_context(struct obs_source *source,
89
		obs_data_t *settings, const char *name)
90
{
91
	if (!obs_context_data_init(&source->context, settings, name))
92 93
		return false;

94 95
	return signal_handler_add_array(source->context.signals,
			source_signals);
96 97
}

98 99
const char *obs_source_get_display_name(enum obs_source_type type,
		const char *id)
100
{
J
jp9000 已提交
101
	const struct obs_source_info *info = get_source_info(type, id);
102
	return (info != NULL) ? info->get_name() : NULL;
103 104
}

105
/* internal initialization */
J
jp9000 已提交
106 107
bool obs_source_init(struct obs_source *source,
		const struct obs_source_info *info)
J
jp9000 已提交
108
{
109
	source->refs = 1;
J
jp9000 已提交
110
	source->user_volume = 1.0f;
111
	source->present_volume = 0.0f;
J
jp9000 已提交
112
	source->sync_offset = 0;
113 114 115
	pthread_mutex_init_value(&source->filter_mutex);
	pthread_mutex_init_value(&source->video_mutex);
	pthread_mutex_init_value(&source->audio_mutex);
116

117 118 119 120 121 122
	if (pthread_mutex_init(&source->filter_mutex, NULL) != 0)
		return false;
	if (pthread_mutex_init(&source->audio_mutex, NULL) != 0)
		return false;
	if (pthread_mutex_init(&source->video_mutex, NULL) != 0)
		return false;
J
jp9000 已提交
123

124
	if (info && info->output_flags & OBS_SOURCE_AUDIO) {
125
		source->audio_line = audio_output_create_line(obs->audio.audio,
126
				source->context.name);
127 128
		if (!source->audio_line) {
			blog(LOG_ERROR, "Failed to create audio line for "
129
			                "source '%s'", source->context.name);
130 131 132
			return false;
		}
	}
133

134 135 136
	obs_context_data_insert(&source->context,
			&obs->data.sources_mutex,
			&obs->data.first_source);
137
	return true;
J
jp9000 已提交
138 139
}

140
static inline void obs_source_dosignal(struct obs_source *source,
141
		const char *signal_obs, const char *signal_source)
142 143 144 145
{
	struct calldata data;

	calldata_init(&data);
146
	calldata_set_ptr(&data, "source", source);
147 148 149
	if (signal_obs)
		signal_handler_signal(obs->signals, signal_obs, &data);
	if (signal_source)
150 151
		signal_handler_signal(source->context.signals, signal_source,
				&data);
152 153 154
	calldata_free(&data);
}

155 156
obs_source_t *obs_source_create(enum obs_source_type type, const char *id,
		const char *name, obs_data_t *settings)
J
jp9000 已提交
157
{
158
	struct obs_source *source = bzalloc(sizeof(struct obs_source));
J
jp9000 已提交
159

J
jp9000 已提交
160
	const struct obs_source_info *info = get_source_info(type, id);
J
jp9000 已提交
161
	if (!info) {
P
Palana 已提交
162
		blog(LOG_ERROR, "Source ID '%s' not found", id);
J
jp9000 已提交
163

164 165 166 167 168 169
		source->info.id      = bstrdup(id);
		source->info.type    = type;
		source->owns_info_id = true;
	} else {
		source->info = *info;
	}
170

171
	if (!obs_source_init_context(source, settings, name))
172 173
		goto fail;

174 175
	if (info && info->get_defaults)
		info->get_defaults(source->context.settings);
J
jp9000 已提交
176

177 178
	/* allow the source to be created even if creation fails so that the
	 * user's data doesn't become lost */
179 180 181
	if (info)
		source->context.data = info->create(source->context.settings,
				source);
182
	if (!source->context.data)
183
		blog(LOG_ERROR, "Failed to create source '%s'!", name);
184

J
jp9000 已提交
185
	if (!obs_source_init(source, info))
186
		goto fail;
J
jp9000 已提交
187

188
	blog(LOG_INFO, "source '%s' (%s) created", name, id);
189
	obs_source_dosignal(source, "source_create", NULL);
J
jp9000 已提交
190
	return source;
191 192 193 194 195

fail:
	blog(LOG_ERROR, "obs_source_create failed");
	obs_source_destroy(source);
	return NULL;
J
jp9000 已提交
196 197
}

198 199
void obs_source_frame_init(struct obs_source_frame *frame,
		enum video_format format, uint32_t width, uint32_t height)
200
{
201
	struct video_frame vid_frame;
J
jp9000 已提交
202 203 204 205

	if (!frame)
		return;

206
	video_frame_init(&vid_frame, format, width, height);
207 208 209
	frame->format = format;
	frame->width  = width;
	frame->height = height;
210

211 212 213
	for (size_t i = 0; i < MAX_AV_PLANES; i++) {
		frame->data[i]     = vid_frame.data[i];
		frame->linesize[i] = vid_frame.linesize[i];
214 215 216
	}
}

217
void obs_source_destroy(struct obs_source *source)
J
jp9000 已提交
218
{
219
	size_t i;
220

J
jp9000 已提交
221 222 223
	if (!source)
		return;

224 225
	obs_context_data_remove(&source->context);

226 227
	blog(LOG_INFO, "source '%s' destroyed", source->context.name);

228
	obs_source_dosignal(source, "source_destroy", "destroy");
229

230
	if (source->context.data) {
231
		source->info.destroy(source->context.data);
232 233
		source->context.data = NULL;
	}
234

235 236
	if (source->filter_parent)
		obs_source_filter_remove(source->filter_parent, source);
237

238 239
	for (i = 0; i < source->filters.num; i++)
		obs_source_release(source->filters.array[i]);
240

241
	for (i = 0; i < source->video_frames.num; i++)
242
		obs_source_frame_destroy(source->video_frames.array[i]);
243

244 245 246 247
	gs_enter_context(obs->video.graphics);
	gs_texrender_destroy(source->async_convert_texrender);
	gs_texture_destroy(source->async_texture);
	gs_leave_context();
J
jp9000 已提交
248

J
jp9000 已提交
249
	for (i = 0; i < MAX_AV_PLANES; i++)
250 251
		bfree(source->audio_data.data[i]);

252 253 254
	audio_line_destroy(source->audio_line);
	audio_resampler_destroy(source->resampler);

255
	gs_texrender_destroy(source->filter_texrender);
256 257 258 259 260
	da_free(source->video_frames);
	da_free(source->filters);
	pthread_mutex_destroy(&source->filter_mutex);
	pthread_mutex_destroy(&source->audio_mutex);
	pthread_mutex_destroy(&source->video_mutex);
261
	obs_context_data_free(&source->context);
262 263 264 265
	
	if (source->owns_info_id)
		bfree((void*)source->info.id);

266 267 268
	bfree(source);
}

269
void obs_source_addref(obs_source_t *source)
270
{
P
Palana 已提交
271
	if (source)
J
jp9000 已提交
272
		os_atomic_inc_long(&source->refs);
273 274
}

275
void obs_source_release(obs_source_t *source)
276
{
P
Palana 已提交
277 278
	if (!source)
		return;
279

J
jp9000 已提交
280
	if (os_atomic_dec_long(&source->refs) == 0)
P
Palana 已提交
281
		obs_source_destroy(source);
282 283
}

284
void obs_source_remove(obs_source_t *source)
285
{
286
	struct obs_core_data *data = &obs->data;
287
	size_t id;
288
	bool   exists;
289 290 291

	pthread_mutex_lock(&data->sources_mutex);

J
jp9000 已提交
292 293
	if (!source || source->removed) {
		pthread_mutex_unlock(&data->sources_mutex);
J
jp9000 已提交
294
		return;
J
jp9000 已提交
295
	}
J
jp9000 已提交
296

J
jp9000 已提交
297
	source->removed = true;
J
jp9000 已提交
298

J
jp9000 已提交
299 300
	obs_source_addref(source);

301 302 303 304
	id = da_find(data->user_sources, &source, 0);
	exists = (id != DARRAY_INVALID);
	if (exists) {
		da_erase(data->user_sources, id);
J
jp9000 已提交
305
		obs_source_release(source);
306 307 308
	}

	pthread_mutex_unlock(&data->sources_mutex);
J
jp9000 已提交
309

310 311 312
	if (exists)
		obs_source_dosignal(source, "source_remove", "remove");

J
jp9000 已提交
313
	obs_source_release(source);
314 315
}

316
bool obs_source_removed(obs_source_t *source)
317
{
J
jp9000 已提交
318
	return source ? source->removed : true;
J
jp9000 已提交
319 320
}

321
static inline obs_data_t *get_defaults(const struct obs_source_info *info)
J
jp9000 已提交
322
{
323
	obs_data_t *settings = obs_data_create();
324 325
	if (info->get_defaults)
		info->get_defaults(settings);
J
jp9000 已提交
326 327 328
	return settings;
}

329
obs_data_t *obs_source_settings(enum obs_source_type type, const char *id)
J
jp9000 已提交
330 331
{
	const struct obs_source_info *info = get_source_info(type, id);
J
jp9000 已提交
332
	return (info) ? get_defaults(info) : NULL;
J
jp9000 已提交
333 334
}

335
obs_properties_t *obs_get_source_properties(enum obs_source_type type,
336
		const char *id)
J
jp9000 已提交
337
{
J
jp9000 已提交
338
	const struct obs_source_info *info = get_source_info(type, id);
339
	if (info && info->get_properties) {
340 341
		obs_data_t       *defaults = get_defaults(info);
		obs_properties_t *properties;
J
jp9000 已提交
342

343
		properties = info->get_properties();
J
jp9000 已提交
344 345 346 347
		obs_properties_apply_settings(properties, defaults);
		obs_data_release(defaults);
		return properties;
	}
J
jp9000 已提交
348 349 350
	return NULL;
}

351
obs_properties_t *obs_source_properties(obs_source_t *source)
352
{
353
	if (source_valid(source) && source->info.get_properties) {
354
		obs_properties_t *props;
355
		props = source->info.get_properties();
356
		obs_properties_apply_settings(props, source->context.settings);
J
jp9000 已提交
357 358 359
		return props;
	}

360 361 362
	return NULL;
}

363
uint32_t obs_source_get_output_flags(obs_source_t *source)
J
jp9000 已提交
364
{
J
jp9000 已提交
365
	return source ? source->info.output_flags : 0;
J
jp9000 已提交
366 367
}

368
static void obs_source_deferred_update(obs_source_t *source)
369
{
370 371 372 373
	if (source->context.data && source->info.update)
		source->info.update(source->context.data,
				source->context.settings);

374 375 376
	source->defer_update = false;
}

377
void obs_source_update(obs_source_t *source, obs_data_t *settings)
J
jp9000 已提交
378
{
J
jp9000 已提交
379 380
	if (!source) return;

381 382 383 384 385 386 387 388
	if (settings)
		obs_data_apply(source->context.settings, settings);

	if (source->info.output_flags & OBS_SOURCE_VIDEO) {
		source->defer_update = true;
	} else if (source->context.data && source->info.update) {
		source->info.update(source->context.data,
				source->context.settings);
389
	}
J
jp9000 已提交
390 391
}

392
void obs_source_send_mouse_click(obs_source_t *source,
K
kc5nra 已提交
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
		const struct obs_mouse_event *event,
		int32_t type, bool mouse_up,
		uint32_t click_count)
{
	if (!source)
		return;

	if (source->info.output_flags & OBS_SOURCE_INTERACTION) {
		if (source->info.mouse_click) {
			source->info.mouse_click(source->context.data,
					event, type, mouse_up, click_count);
		}
	}
}

408
void obs_source_send_mouse_move(obs_source_t *source,
K
kc5nra 已提交
409 410 411 412 413 414 415 416 417 418 419 420 421
		const struct obs_mouse_event *event, bool mouse_leave)
{
	if (!source)
		return;

	if (source->info.output_flags & OBS_SOURCE_INTERACTION) {
		if (source->info.mouse_move) {
			source->info.mouse_move(source->context.data,
					event, mouse_leave);
		}
	}
}

422
void obs_source_send_mouse_wheel(obs_source_t *source,
K
kc5nra 已提交
423 424 425 426 427 428 429 430 431 432 433 434 435
		const struct obs_mouse_event *event, int x_delta, int y_delta)
{
	if (!source)
		return;

	if (source->info.output_flags & OBS_SOURCE_INTERACTION) {
		if (source->info.mouse_wheel) {
			source->info.mouse_wheel(source->context.data,
					event, x_delta, y_delta);
		}
	}
}

436
void obs_source_send_focus(obs_source_t *source, bool focus)
K
kc5nra 已提交
437 438 439 440 441 442 443 444 445 446 447
{
	if (!source)
		return;

	if (source->info.output_flags & OBS_SOURCE_INTERACTION) {
		if (source->info.focus) {
			source->info.focus(source->context.data, focus);
		}
	}
}

448
void obs_source_send_key_click(obs_source_t *source,
K
kc5nra 已提交
449 450 451 452 453 454 455 456 457 458 459 460 461
		const struct obs_key_event *event, bool key_up)
{
	if (!source)
		return;

	if (source->info.output_flags & OBS_SOURCE_INTERACTION) {
		if (source->info.key_click) {
			source->info.key_click(source->context.data, event,
					key_up);
		}
	}
}

462
static void activate_source(obs_source_t *source)
J
jp9000 已提交
463
{
464
	if (source->context.data && source->info.activate)
465
		source->info.activate(source->context.data);
466
	obs_source_dosignal(source, "source_activate", "activate");
J
jp9000 已提交
467 468
}

469
static void deactivate_source(obs_source_t *source)
J
jp9000 已提交
470
{
471
	if (source->context.data && source->info.deactivate)
472
		source->info.deactivate(source->context.data);
473
	obs_source_dosignal(source, "source_deactivate", "deactivate");
474
}
475

476
static void show_source(obs_source_t *source)
477
{
478
	if (source->context.data && source->info.show)
479
		source->info.show(source->context.data);
480
	obs_source_dosignal(source, "source_show", "show");
481 482
}

483
static void hide_source(obs_source_t *source)
484
{
485
	if (source->context.data && source->info.hide)
486
		source->info.hide(source->context.data);
487
	obs_source_dosignal(source, "source_hide", "hide");
488 489
}

490 491
static void activate_tree(obs_source_t *parent, obs_source_t *child,
		void *param)
492
{
J
jp9000 已提交
493
	if (os_atomic_inc_long(&child->activate_refs) == 1)
494
		activate_source(child);
J
jp9000 已提交
495 496 497

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
498 499
}

500
static void deactivate_tree(obs_source_t *parent, obs_source_t *child,
501 502
		void *param)
{
J
jp9000 已提交
503
	if (os_atomic_dec_long(&child->activate_refs) == 0)
504
		deactivate_source(child);
J
jp9000 已提交
505 506 507

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
508 509
}

510
static void show_tree(obs_source_t *parent, obs_source_t *child, void *param)
511
{
J
jp9000 已提交
512
	if (os_atomic_inc_long(&child->show_refs) == 1)
513 514 515 516 517 518
		show_source(child);

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

519
static void hide_tree(obs_source_t *parent, obs_source_t *child, void *param)
520
{
J
jp9000 已提交
521
	if (os_atomic_dec_long(&child->show_refs) == 0)
522 523 524 525 526 527
		hide_source(child);

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

528
void obs_source_activate(obs_source_t *source, enum view_type type)
529 530 531
{
	if (!source) return;

J
jp9000 已提交
532
	if (os_atomic_inc_long(&source->show_refs) == 1) {
533 534 535 536 537
		show_source(source);
		obs_source_enum_tree(source, show_tree, NULL);
	}

	if (type == MAIN_VIEW) {
J
jp9000 已提交
538
		if (os_atomic_inc_long(&source->activate_refs) == 1) {
539 540 541 542
			activate_source(source);
			obs_source_enum_tree(source, activate_tree, NULL);
			obs_source_set_present_volume(source, 1.0f);
		}
543 544 545
	}
}

546
void obs_source_deactivate(obs_source_t *source, enum view_type type)
547 548 549
{
	if (!source) return;

J
jp9000 已提交
550
	if (os_atomic_dec_long(&source->show_refs) == 0) {
551 552 553 554 555
		hide_source(source);
		obs_source_enum_tree(source, hide_tree, NULL);
	}

	if (type == MAIN_VIEW) {
J
jp9000 已提交
556
		if (os_atomic_dec_long(&source->activate_refs) == 0) {
557 558 559 560
			deactivate_source(source);
			obs_source_enum_tree(source, deactivate_tree, NULL);
			obs_source_set_present_volume(source, 0.0f);
		}
561
	}
J
jp9000 已提交
562 563
}

564
void obs_source_video_tick(obs_source_t *source, float seconds)
J
jp9000 已提交
565
{
J
jp9000 已提交
566 567
	if (!source) return;

568 569 570
	if (source->defer_update)
		obs_source_deferred_update(source);

J
jp9000 已提交
571 572
	/* reset the filter render texture information once every frame */
	if (source->filter_texrender)
573
		gs_texrender_reset(source->filter_texrender);
J
jp9000 已提交
574

575
	if (source->context.data && source->info.video_tick)
576
		source->info.video_tick(source->context.data, seconds);
577 578

	source->async_rendered = false;
J
jp9000 已提交
579 580
}

581
/* unless the value is 3+ hours worth of frames, this won't overflow */
J
jp9000 已提交
582
static inline uint64_t conv_frames_to_time(size_t frames)
583
{
J
jp9000 已提交
584
	const struct audio_output_info *info;
585
	info = audio_output_get_info(obs->audio.audio);
586 587 588

	return (uint64_t)frames * 1000000000ULL /
		(uint64_t)info->samples_per_sec;
589 590
}

591
/* maximum "direct" timestamp variance in nanoseconds */
592
#define MAX_TS_VAR          5000000000ULL
593
/* maximum time that timestamp can jump in nanoseconds */
594
#define MAX_TIMESTAMP_JUMP  2000000000ULL
595

596
static inline void reset_audio_timing(obs_source_t *source, uint64_t timetamp)
597 598 599 600
{
	source->timing_set    = true;
	source->timing_adjust = os_gettime_ns() - timetamp;
}
601

602
static inline void handle_ts_jump(obs_source_t *source, uint64_t expected,
603
		uint64_t ts, uint64_t diff)
604
{
J
jp9000 已提交
605
	blog(LOG_DEBUG, "Timestamp for source '%s' jumped by '%"PRIu64"', "
606
	                "expected value %"PRIu64", input value %"PRIu64,
607
	                source->context.name, diff, expected, ts);
608 609

	/* if has video, ignore audio data until reset */
610
	if (!(source->info.output_flags & OBS_SOURCE_ASYNC))
611 612 613
		reset_audio_timing(source, ts);
}

614 615 616 617 618 619 620 621 622 623
#define VOL_MIN -96.0f
#define VOL_MAX  0.0f

static inline float to_db(float val)
{
	float db = 20.0f * log10f(val);
	return isfinite(db) ? db : VOL_MIN;
}

static void calc_volume_levels(struct obs_source *source, float *array,
624
		size_t frames, float volume)
625 626 627 628 629
{
	float sum_val = 0.0f;
	float max_val = 0.0f;
	float rms_val = 0.0f;

630
	audio_t        *audio          = obs_get_audio();
631 632
	const uint32_t sample_rate    = audio_output_get_sample_rate(audio);
	const size_t   channels       = audio_output_get_channels(audio);
633 634 635 636 637 638 639 640 641 642 643 644
	const size_t   count          = frames * channels;
	const size_t   vol_peak_delay = sample_rate * 3;
	const float    alpha          = 0.15f;

	for (size_t i = 0; i < count; i++) {
		float val      = array[i];
		float val_pow2 = val * val;

		sum_val += val_pow2;
		max_val  = fmaxf(max_val, val_pow2);
	}

645 646 647 648 649 650 651 652 653
	/*
	  We want the volume meters scale linearly in respect to current
	  volume, so, no need to apply volume here.
	*/

	UNUSED_PARAMETER(volume);

	rms_val = to_db(sqrtf(sum_val / (float)count));
	max_val = to_db(sqrtf(max_val));
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672

	if (max_val > source->vol_max)
		source->vol_max = max_val;
	else
		source->vol_max = alpha * source->vol_max +
			(1.0f - alpha) * max_val;

	if (source->vol_max > source->vol_peak ||
	    source->vol_update_count > vol_peak_delay) {
		source->vol_peak         = source->vol_max;
		source->vol_update_count = 0;
	} else {
		source->vol_update_count += count;
	}

	source->vol_mag = alpha * rms_val + source->vol_mag * (1.0f - alpha);
}

/* TODO update peak/etc later */
673
static void obs_source_update_volume_level(obs_source_t *source,
674 675 676 677 678
		struct audio_data *in)
{
	if (source && in) {
		struct calldata data = {0};

679 680
		calc_volume_levels(source, (float*)in->data[0], in->frames,
				in->volume);
681

682 683 684 685
		calldata_set_ptr  (&data, "source",    source);
		calldata_set_float(&data, "level",     source->vol_max);
		calldata_set_float(&data, "magnitude", source->vol_mag);
		calldata_set_float(&data, "peak",      source->vol_peak);
686 687 688 689 690 691 692 693 694 695

		signal_handler_signal(source->context.signals, "volume_level",
				&data);
		signal_handler_signal(obs->signals, "source_volume_level",
				&data);

		calldata_free(&data);
	}
}

696
static void source_output_audio_line(obs_source_t *source,
697 698 699
		const struct audio_data *data)
{
	struct audio_data in = *data;
700
	uint64_t diff;
701 702

	if (!source->timing_set) {
703
		reset_audio_timing(source, in.timestamp);
704 705

		/* detects 'directly' set timestamps as long as they're within
706
		 * a certain threshold */
707
		if ((source->timing_adjust + MAX_TS_VAR) < MAX_TS_VAR * 2)
708
			source->timing_adjust = 0;
709

J
jp9000 已提交
710
	} else if (source->next_audio_ts_min != 0) {
711
		bool ts_under = (in.timestamp < source->next_audio_ts_min);
712

713 714 715 716 717
		diff = ts_under ?
			(source->next_audio_ts_min - in.timestamp) :
			(in.timestamp - source->next_audio_ts_min);

		/* smooth audio if lower or within threshold */
718
		if (diff > MAX_TIMESTAMP_JUMP)
719 720 721 722
			handle_ts_jump(source, source->next_audio_ts_min,
					in.timestamp, diff);
		else if (ts_under || diff < TS_SMOOTHING_THRESHOLD)
			in.timestamp = source->next_audio_ts_min;
723 724
	}

725
	source->next_audio_ts_min = in.timestamp +
J
jp9000 已提交
726
		conv_frames_to_time(in.frames);
727

J
jp9000 已提交
728
	in.timestamp += source->timing_adjust + source->sync_offset;
J
jp9000 已提交
729 730
	in.volume = source->user_volume * source->present_volume *
		obs->audio.user_volume * obs->audio.present_volume;
731

732
	audio_line_output(source->audio_line, &in);
733
	obs_source_update_volume_level(source, &in);
734 735
}

736 737 738 739 740 741 742 743
enum convert_type {
	CONVERT_NONE,
	CONVERT_NV12,
	CONVERT_420,
	CONVERT_422_U,
	CONVERT_422_Y,
};

744
static inline enum convert_type get_convert_type(enum video_format format)
745
{
746
	switch (format) {
747 748 749 750 751 752 753 754 755 756 757
	case VIDEO_FORMAT_I420:
		return CONVERT_420;
	case VIDEO_FORMAT_NV12:
		return CONVERT_NV12;

	case VIDEO_FORMAT_YVYU:
	case VIDEO_FORMAT_YUY2:
		return CONVERT_422_Y;
	case VIDEO_FORMAT_UYVY:
		return CONVERT_422_U;

758
	case VIDEO_FORMAT_NONE:
759 760 761 762 763 764 765 766 767
	case VIDEO_FORMAT_RGBA:
	case VIDEO_FORMAT_BGRA:
	case VIDEO_FORMAT_BGRX:
		return CONVERT_NONE;
	}

	return CONVERT_NONE;
}

768
static inline bool set_packed422_sizes(struct obs_source *source,
769
		struct obs_source_frame *frame)
770 771
{
	source->async_convert_height = frame->height;
772 773 774 775 776 777
	source->async_convert_width  = frame->width / 2;
	source->async_texture_format = GS_BGRA;
	return true;
}

static inline bool set_planar420_sizes(struct obs_source *source,
778
		struct obs_source_frame *frame)
779 780 781 782 783 784 785 786 787 788
{
	uint32_t size = frame->width * frame->height;
	size += size/2;

	source->async_convert_width   = frame->width;
	source->async_convert_height  = (size / frame->width + 1) & 0xFFFFFFFE;
	source->async_texture_format  = GS_R8;
	source->async_plane_offset[0] = frame->width * frame->height;
	source->async_plane_offset[1] = source->async_plane_offset[0] +
		frame->width * frame->height / 4;
789 790 791 792
	return true;
}

static inline bool init_gpu_conversion(struct obs_source *source,
793
		struct obs_source_frame *frame)
794 795 796 797 798 799 800
{
	switch (get_convert_type(frame->format)) {
		case CONVERT_422_Y:
		case CONVERT_422_U:
			return set_packed422_sizes(source, frame);

		case CONVERT_420:
801 802 803 804
			return set_planar420_sizes(source, frame);

		case CONVERT_NV12:
			assert(false && "NV12 not yet implemented");
805 806 807 808 809 810 811 812 813 814 815
			/* TODO: implement conversion */
			break;

		case CONVERT_NONE:
			assert(false && "No conversion requested");
			break;

	}
	return false;
}

816 817 818 819 820 821 822 823 824 825 826
static inline enum gs_color_format convert_video_format(
		enum video_format format)
{
	if (format == VIDEO_FORMAT_RGBA)
		return GS_RGBA;
	else if (format == VIDEO_FORMAT_BGRA)
		return GS_BGRA;

	return GS_BGRX;
}

827
static inline bool set_async_texture_size(struct obs_source *source,
828
		struct obs_source_frame *frame)
829 830 831 832 833 834 835 836 837 838 839
{
	enum convert_type prev, cur;
	prev = get_convert_type(source->async_format);
	cur  = get_convert_type(frame->format);
	if (source->async_texture) {
		if (source->async_width  == frame->width &&
		    source->async_height == frame->height &&
		    prev == cur)
			return true;
	}

840 841
	gs_texture_destroy(source->async_texture);
	gs_texrender_destroy(source->async_convert_texrender);
842 843 844 845 846 847
	source->async_convert_texrender = NULL;

	if (cur != CONVERT_NONE && init_gpu_conversion(source, frame)) {
		source->async_gpu_conversion = true;

		source->async_convert_texrender =
848
			gs_texrender_create(GS_BGRX, GS_ZS_NONE);
849

850
		source->async_texture = gs_texture_create(
851 852
				source->async_convert_width,
				source->async_convert_height,
853 854
				source->async_texture_format,
				1, NULL, GS_DYNAMIC);
855 856

	} else {
857 858
		enum gs_color_format format = convert_video_format(
				frame->format);
859 860
		source->async_gpu_conversion = false;

861
		source->async_texture = gs_texture_create(
862
				frame->width, frame->height,
863
				format, 1, NULL, GS_DYNAMIC);
864 865 866 867 868 869 870 871 872 873
	}

	if (!source->async_texture)
		return false;

	source->async_width  = frame->width;
	source->async_height = frame->height;
	return true;
}

874
static void upload_raw_frame(gs_texture_t *tex,
875
		const struct obs_source_frame *frame)
876 877 878 879
{
	switch (get_convert_type(frame->format)) {
		case CONVERT_422_U:
		case CONVERT_422_Y:
880
			gs_texture_set_image(tex, frame->data[0],
881 882 883 884
					frame->linesize[0], false);
			break;

		case CONVERT_420:
885
			gs_texture_set_image(tex, frame->data[0],
886 887 888 889
					frame->width, false);
			break;

		case CONVERT_NV12:
890 891 892 893 894 895 896 897 898 899 900 901 902
			assert(false && "Conversion not yet implemented");
			break;

		case CONVERT_NONE:
			assert(false && "No conversion requested");
			break;
	}
}

static const char *select_conversion_technique(enum video_format format)
{
	switch (format) {
		case VIDEO_FORMAT_UYVY:
903
			return "UYVY_Reverse";
904 905 906 907 908 909 910 911

		case VIDEO_FORMAT_YUY2:
			return "YUY2_Reverse";

		case VIDEO_FORMAT_YVYU:
			return "YVYU_Reverse";

		case VIDEO_FORMAT_I420:
912 913 914
			return "I420_Reverse";

		case VIDEO_FORMAT_NV12:
915 916 917 918 919 920 921 922 923 924 925 926 927
			assert(false && "Conversion not yet implemented");
			break;

		case VIDEO_FORMAT_BGRA:
		case VIDEO_FORMAT_BGRX:
		case VIDEO_FORMAT_RGBA:
		case VIDEO_FORMAT_NONE:
			assert(false && "No conversion requested");
			break;
	}
	return NULL;
}

928
static inline void set_eparam(gs_effect_t *effect, const char *name, float val)
929
{
930
	gs_eparam_t *param = gs_effect_get_param_by_name(effect, name);
931
	gs_effect_set_float(param, val);
932 933 934
}

static bool update_async_texrender(struct obs_source *source,
935
		const struct obs_source_frame *frame)
936
{
937 938
	gs_texture_t   *tex       = source->async_texture;
	gs_texrender_t *texrender = source->async_convert_texrender;
939

940
	gs_texrender_reset(texrender);
941 942 943 944 945 946

	upload_raw_frame(tex, frame);

	uint32_t cx = source->async_width;
	uint32_t cy = source->async_height;

947 948 949
	float convert_width  = (float)source->async_convert_width;
	float convert_height = (float)source->async_convert_height;

950 951
	gs_effect_t *conv = obs->video.conversion_effect;
	gs_technique_t *tech = gs_effect_get_technique(conv,
952 953
			select_conversion_technique(frame->format));

954
	if (!gs_texrender_begin(texrender, cx, cy))
955 956
		return false;

957 958
	gs_technique_begin(tech);
	gs_technique_begin_pass(tech, 0);
959

960
	gs_effect_set_texture(gs_effect_get_param_by_name(conv, "image"), tex);
961 962 963 964
	set_eparam(conv, "width",  (float)cx);
	set_eparam(conv, "height", (float)cy);
	set_eparam(conv, "width_i",  1.0f / cx);
	set_eparam(conv, "height_i", 1.0f / cy);
965
	set_eparam(conv, "width_d2",  cx * 0.5f);
966
	set_eparam(conv, "height_d2", cy * 0.5f);
967
	set_eparam(conv, "width_d2_i",  1.0f / (cx * 0.5f));
968
	set_eparam(conv, "height_d2_i", 1.0f / (cy * 0.5f));
969 970 971 972 973 974 975 976 977 978
	set_eparam(conv, "input_width",  convert_width);
	set_eparam(conv, "input_height", convert_height);
	set_eparam(conv, "input_width_i",  1.0f / convert_width);
	set_eparam(conv, "input_height_i", 1.0f / convert_height);
	set_eparam(conv, "input_width_i_d2",  (1.0f / convert_width)  * 0.5f);
	set_eparam(conv, "input_height_i_d2", (1.0f / convert_height) * 0.5f);
	set_eparam(conv, "u_plane_offset",
			(float)source->async_plane_offset[0]);
	set_eparam(conv, "v_plane_offset",
			(float)source->async_plane_offset[1]);
979 980 981 982 983

	gs_ortho(0.f, (float)cx, 0.f, (float)cy, -100.f, 100.f);

	gs_draw_sprite(tex, 0, cx, cy);

984 985
	gs_technique_end_pass(tech);
	gs_technique_end(tech);
986

987
	gs_texrender_end(texrender);
988 989 990 991

	return true;
}

992
static bool update_async_texture(struct obs_source *source,
993
		const struct obs_source_frame *frame)
994
{
995 996
	gs_texture_t      *tex       = source->async_texture;
	gs_texrender_t    *texrender = source->async_convert_texrender;
997
	enum convert_type type      = get_convert_type(frame->format);
998
	uint8_t           *ptr;
999 1000
	uint32_t          linesize;

1001 1002 1003
	source->async_format     = frame->format;
	source->async_flip       = frame->flip;
	source->async_full_range = frame->full_range;
1004 1005
	memcpy(source->async_color_matrix, frame->color_matrix,
			sizeof(frame->color_matrix));
1006 1007 1008 1009
	memcpy(source->async_color_range_min, frame->color_range_min,
			sizeof frame->color_range_min);
	memcpy(source->async_color_range_max, frame->color_range_max,
			sizeof frame->color_range_max);
1010

1011 1012 1013
	if (source->async_gpu_conversion && texrender)
		return update_async_texrender(source, frame);

1014
	if (type == CONVERT_NONE) {
1015
		gs_texture_set_image(tex, frame->data[0], frame->linesize[0],
1016
				false);
1017 1018 1019
		return true;
	}

1020
	if (!gs_texture_map(tex, &ptr, &linesize))
1021 1022 1023
		return false;

	if (type == CONVERT_420)
J
jp9000 已提交
1024 1025 1026
		decompress_420((const uint8_t* const*)frame->data,
				frame->linesize,
				0, frame->height, ptr, linesize);
1027 1028

	else if (type == CONVERT_NV12)
J
jp9000 已提交
1029 1030 1031
		decompress_nv12((const uint8_t* const*)frame->data,
				frame->linesize,
				0, frame->height, ptr, linesize);
1032 1033

	else if (type == CONVERT_422_Y)
1034
		decompress_422(frame->data[0], frame->linesize[0],
J
jp9000 已提交
1035
				0, frame->height, ptr, linesize, true);
1036 1037

	else if (type == CONVERT_422_U)
1038
		decompress_422(frame->data[0], frame->linesize[0],
J
jp9000 已提交
1039
				0, frame->height, ptr, linesize, false);
1040

1041
	gs_texture_unmap(tex);
1042 1043 1044
	return true;
}

1045
static inline void obs_source_draw_texture(struct obs_source *source,
1046
		gs_effect_t *effect, float *color_matrix,
1047
		float const *color_range_min, float const *color_range_max)
1048
{
1049 1050
	gs_texture_t *tex = source->async_texture;
	gs_eparam_t  *param;
1051

1052
	if (source->async_convert_texrender)
1053
		tex = gs_texrender_get_texture(source->async_convert_texrender);
1054

P
Palana 已提交
1055
	if (color_range_min) {
1056
		size_t const size = sizeof(float) * 3;
1057 1058
		param = gs_effect_get_param_by_name(effect, "color_range_min");
		gs_effect_set_val(param, color_range_min, size);
P
Palana 已提交
1059
	}
1060

P
Palana 已提交
1061 1062
	if (color_range_max) {
		size_t const size = sizeof(float) * 3;
1063 1064
		param = gs_effect_get_param_by_name(effect, "color_range_max");
		gs_effect_set_val(param, color_range_max, size);
P
Palana 已提交
1065
	}
1066

P
Palana 已提交
1067
	if (color_matrix) {
1068 1069
		param = gs_effect_get_param_by_name(effect, "color_matrix");
		gs_effect_set_val(param, color_matrix, sizeof(float) * 16);
1070 1071
	}

1072 1073
	param = gs_effect_get_param_by_name(effect, "image");
	gs_effect_set_texture(param, tex);
1074

1075 1076
	gs_draw_sprite(tex, source->async_flip ? GS_FLIP_V : 0, 0, 0);
}
1077

1078 1079
static void obs_source_draw_async_texture(struct obs_source *source)
{
1080
	gs_effect_t    *effect        = gs_get_effect();
1081 1082 1083 1084
	bool           yuv           = format_is_yuv(source->async_format);
	bool           limited_range = yuv && !source->async_full_range;
	const char     *type         = yuv ? "DrawMatrix" : "Draw";
	bool           def_draw      = (!effect);
1085
	gs_technique_t *tech          = NULL;
1086 1087 1088

	if (def_draw) {
		effect = obs_get_default_effect();
1089 1090 1091
		tech = gs_effect_get_technique(effect, type);
		gs_technique_begin(tech);
		gs_technique_begin_pass(tech, 0);
1092 1093 1094
	}

	obs_source_draw_texture(source, effect,
1095 1096 1097
			yuv ? source->async_color_matrix : NULL,
			limited_range ? source->async_color_range_min : NULL,
			limited_range ? source->async_color_range_max : NULL);
1098 1099

	if (def_draw) {
1100 1101
		gs_technique_end_pass(tech);
		gs_technique_end(tech);
1102
	}
1103 1104
}

1105
static void obs_source_render_async_video(obs_source_t *source)
1106
{
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
	if (!source->async_rendered) {
		struct obs_source_frame *frame = obs_source_get_frame(source);

		source->async_rendered = true;
		if (frame) {
			if (!set_async_texture_size(source, frame))
				return;
			if (!update_async_texture(source, frame))
				return;
		}

		obs_source_release_frame(source, frame);
1119
	}
1120

1121 1122
	if (source->async_texture)
		obs_source_draw_async_texture(source);
1123 1124
}

1125
static inline void obs_source_render_filters(obs_source_t *source)
1126 1127 1128 1129 1130 1131
{
	source->rendering_filter = true;
	obs_source_video_render(source->filters.array[0]);
	source->rendering_filter = false;
}

1132
static inline void obs_source_default_render(obs_source_t *source,
J
jp9000 已提交
1133
		bool color_matrix)
1134
{
1135
	gs_effect_t    *effect     = obs->video.default_effect;
1136
	const char     *tech_name = color_matrix ? "DrawMatrix" : "Draw";
1137
	gs_technique_t *tech       = gs_effect_get_technique(effect, tech_name);
1138
	size_t         passes, i;
1139

1140
	passes = gs_technique_begin(tech);
1141
	for (i = 0; i < passes; i++) {
1142
		gs_technique_begin_pass(tech, i);
1143 1144
		if (source->context.data)
			source->info.video_render(source->context.data, effect);
1145
		gs_technique_end_pass(tech);
1146
	}
1147
	gs_technique_end(tech);
1148 1149
}

1150
static inline void obs_source_main_render(obs_source_t *source)
1151
{
1152 1153 1154
	uint32_t flags      = source->info.output_flags;
	bool color_matrix   = (flags & OBS_SOURCE_COLOR_MATRIX) != 0;
	bool custom_draw    = (flags & OBS_SOURCE_CUSTOM_DRAW) != 0;
1155 1156
	bool default_effect = !source->filter_parent &&
	                      source->filters.num == 0 &&
1157
	                      !custom_draw;
1158 1159

	if (default_effect)
J
jp9000 已提交
1160
		obs_source_default_render(source, color_matrix);
1161
	else if (source->context.data)
1162
		source->info.video_render(source->context.data,
1163
				custom_draw ? NULL : gs_get_effect());
1164 1165
}

1166
void obs_source_video_render(obs_source_t *source)
J
jp9000 已提交
1167
{
1168
	if (!source_valid(source)) return;
J
jp9000 已提交
1169

1170 1171
	if (source->filters.num && !source->rendering_filter)
		obs_source_render_filters(source);
1172

1173 1174 1175 1176
	else if (source->info.video_render)
		obs_source_main_render(source);

	else if (source->filter_target)
1177 1178
		obs_source_video_render(source->filter_target);

1179
	else
1180
		obs_source_render_async_video(source);
J
jp9000 已提交
1181 1182
}

1183
uint32_t obs_source_get_width(obs_source_t *source)
J
jp9000 已提交
1184
{
1185
	if (!source_valid(source)) return 0;
1186

1187 1188
	if (source->info.get_width)
		return source->info.get_width(source->context.data);
1189
	return source->async_width;
J
jp9000 已提交
1190 1191
}

1192
uint32_t obs_source_get_height(obs_source_t *source)
J
jp9000 已提交
1193
{
1194
	if (!source_valid(source)) return 0;
1195

1196 1197
	if (source->info.get_height)
		return source->info.get_height(source->context.data);
1198
	return source->async_height;
J
jp9000 已提交
1199 1200
}

1201
obs_source_t *obs_filter_get_parent(obs_source_t *filter)
1202
{
J
jp9000 已提交
1203
	return filter ? filter->filter_parent : NULL;
1204 1205
}

1206
obs_source_t *obs_filter_get_target(obs_source_t *filter)
J
jp9000 已提交
1207
{
J
jp9000 已提交
1208
	return filter ? filter->filter_target : NULL;
J
jp9000 已提交
1209 1210
}

1211
void obs_source_filter_add(obs_source_t *source, obs_source_t *filter)
J
jp9000 已提交
1212
{
J
jp9000 已提交
1213 1214 1215
	if (!source || !filter)
		return;

1216 1217
	pthread_mutex_lock(&source->filter_mutex);

J
jp9000 已提交
1218
	if (da_find(source->filters, &filter, 0) != DARRAY_INVALID) {
J
jp9000 已提交
1219 1220 1221 1222 1223 1224
		blog(LOG_WARNING, "Tried to add a filter that was already "
		                  "present on the source");
		return;
	}

	if (source->filters.num) {
1225
		obs_source_t **back = da_end(source->filters);
J
jp9000 已提交
1226 1227 1228 1229
		(*back)->filter_target = filter;
	}

	da_push_back(source->filters, &filter);
1230 1231 1232 1233

	pthread_mutex_unlock(&source->filter_mutex);

	filter->filter_parent = source;
J
jp9000 已提交
1234 1235 1236
	filter->filter_target = source;
}

1237
void obs_source_filter_remove(obs_source_t *source, obs_source_t *filter)
J
jp9000 已提交
1238
{
1239 1240
	size_t idx;

J
jp9000 已提交
1241 1242 1243
	if (!source || !filter)
		return;

1244 1245 1246
	pthread_mutex_lock(&source->filter_mutex);

	idx = da_find(source->filters, &filter, 0);
J
jp9000 已提交
1247
	if (idx == DARRAY_INVALID)
J
jp9000 已提交
1248 1249 1250
		return;

	if (idx > 0) {
1251
		obs_source_t *prev = source->filters.array[idx-1];
J
jp9000 已提交
1252 1253 1254 1255
		prev->filter_target = filter->filter_target;
	}

	da_erase(source->filters, idx);
1256 1257 1258 1259

	pthread_mutex_unlock(&source->filter_mutex);

	filter->filter_parent = NULL;
J
jp9000 已提交
1260 1261 1262
	filter->filter_target = NULL;
}

1263
void obs_source_filter_set_order(obs_source_t *source, obs_source_t *filter,
J
jp9000 已提交
1264
		enum obs_order_movement movement)
J
jp9000 已提交
1265
{
J
jp9000 已提交
1266 1267 1268 1269 1270 1271
	size_t idx, i;

	if (!source || !filter)
		return;

	idx = da_find(source->filters, &filter, 0);
J
jp9000 已提交
1272
	if (idx == DARRAY_INVALID)
J
jp9000 已提交
1273 1274
		return;

J
jp9000 已提交
1275
	if (movement == OBS_ORDER_MOVE_UP) {
J
jp9000 已提交
1276 1277 1278 1279
		if (idx == source->filters.num-1)
			return;
		da_move_item(source->filters, idx, idx+1);

J
jp9000 已提交
1280
	} else if (movement == OBS_ORDER_MOVE_DOWN) {
J
jp9000 已提交
1281 1282 1283 1284
		if (idx == 0)
			return;
		da_move_item(source->filters, idx, idx-1);

J
jp9000 已提交
1285
	} else if (movement == OBS_ORDER_MOVE_TOP) {
J
jp9000 已提交
1286 1287 1288 1289
		if (idx == source->filters.num-1)
			return;
		da_move_item(source->filters, idx, source->filters.num-1);

J
jp9000 已提交
1290
	} else if (movement == OBS_ORDER_MOVE_BOTTOM) {
J
jp9000 已提交
1291 1292 1293 1294 1295
		if (idx == 0)
			return;
		da_move_item(source->filters, idx, 0);
	}

1296
	/* reorder filter targets, not the nicest way of dealing with things */
J
jp9000 已提交
1297
	for (i = 0; i < source->filters.num; i++) {
1298
		obs_source_t *next_filter = (i == source->filters.num-1) ?
J
jp9000 已提交
1299 1300 1301 1302 1303
			source : source->filters.array[idx+1];
		source->filters.array[i]->filter_target = next_filter;
	}
}

1304
obs_data_t *obs_source_get_settings(obs_source_t *source)
J
jp9000 已提交
1305
{
J
jp9000 已提交
1306 1307
	if (!source) return NULL;

1308 1309
	obs_data_addref(source->context.settings);
	return source->context.settings;
J
jp9000 已提交
1310 1311
}

1312
static inline struct obs_source_frame *filter_async_video(obs_source_t *source,
1313
		struct obs_source_frame *in)
1314 1315 1316 1317
{
	size_t i;
	for (i = source->filters.num; i > 0; i--) {
		struct obs_source *filter = source->filters.array[i-1];
1318 1319

		if (filter->context.data && filter->info.filter_video) {
1320 1321
			in = filter->info.filter_video(filter->context.data,
					in);
1322 1323 1324 1325 1326 1327 1328 1329
			if (!in)
				return NULL;
		}
	}

	return in;
}

1330 1331
static inline void copy_frame_data_line(struct obs_source_frame *dst,
		const struct obs_source_frame *src, uint32_t plane, uint32_t y)
1332
{
1333 1334 1335 1336
	uint32_t pos_src = y * src->linesize[plane];
	uint32_t pos_dst = y * dst->linesize[plane];
	uint32_t bytes = dst->linesize[plane] < src->linesize[plane] ?
		dst->linesize[plane] : src->linesize[plane];
1337 1338 1339 1340

	memcpy(dst->data[plane] + pos_dst, src->data[plane] + pos_src, bytes);
}

1341 1342 1343
static inline void copy_frame_data_plane(struct obs_source_frame *dst,
		const struct obs_source_frame *src,
		uint32_t plane, uint32_t lines)
1344
{
1345
	if (dst->linesize[plane] != src->linesize[plane])
1346 1347 1348 1349
		for (uint32_t y = 0; y < lines; y++)
			copy_frame_data_line(dst, src, plane, y);
	else
		memcpy(dst->data[plane], src->data[plane],
1350
				dst->linesize[plane] * lines);
1351 1352
}

1353 1354
static void copy_frame_data(struct obs_source_frame *dst,
		const struct obs_source_frame *src)
1355 1356
{
	dst->flip         = src->flip;
1357
	dst->full_range   = src->full_range;
1358 1359
	dst->timestamp    = src->timestamp;
	memcpy(dst->color_matrix, src->color_matrix, sizeof(float) * 16);
1360 1361 1362 1363 1364
	if (!dst->full_range) {
		size_t const size = sizeof(float) * 3;
		memcpy(dst->color_range_min, src->color_range_min, size);
		memcpy(dst->color_range_max, src->color_range_max, size);
	}
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388

	switch (dst->format) {
	case VIDEO_FORMAT_I420:
		copy_frame_data_plane(dst, src, 0, dst->height);
		copy_frame_data_plane(dst, src, 1, dst->height/2);
		copy_frame_data_plane(dst, src, 2, dst->height/2);
		break;

	case VIDEO_FORMAT_NV12:
		copy_frame_data_plane(dst, src, 0, dst->height);
		copy_frame_data_plane(dst, src, 1, dst->height/2);
		break;

	case VIDEO_FORMAT_YVYU:
	case VIDEO_FORMAT_YUY2:
	case VIDEO_FORMAT_UYVY:
	case VIDEO_FORMAT_NONE:
	case VIDEO_FORMAT_RGBA:
	case VIDEO_FORMAT_BGRA:
	case VIDEO_FORMAT_BGRX:
		copy_frame_data_plane(dst, src, 0, dst->height);
	}
}

1389 1390
static inline struct obs_source_frame *cache_video(
		const struct obs_source_frame *frame)
1391
{
1392
	/* TODO: use an actual cache */
1393 1394
	struct obs_source_frame *new_frame = obs_source_frame_create(
			frame->format, frame->width, frame->height);
1395

1396
	copy_frame_data(new_frame, frame);
1397
	return new_frame;
1398 1399
}

1400
static bool ready_async_frame(obs_source_t *source, uint64_t sys_time);
1401 1402 1403 1404

static inline void cycle_frames(struct obs_source *source)
{
	if (source->video_frames.num && !source->activate_refs)
1405
		ready_async_frame(source, os_gettime_ns());
1406 1407
}

1408
void obs_source_output_video(obs_source_t *source,
1409
		const struct obs_source_frame *frame)
1410
{
J
jp9000 已提交
1411 1412 1413
	if (!source || !frame)
		return;

1414
	struct obs_source_frame *output = cache_video(frame);
1415 1416 1417 1418 1419

	pthread_mutex_lock(&source->filter_mutex);
	output = filter_async_video(source, output);
	pthread_mutex_unlock(&source->filter_mutex);

1420 1421
	if (output) {
		pthread_mutex_lock(&source->video_mutex);
1422
		cycle_frames(source);
1423 1424 1425
		da_push_back(source->video_frames, &output);
		pthread_mutex_unlock(&source->video_mutex);
	}
1426 1427
}

1428
static inline struct obs_audio_data *filter_async_audio(obs_source_t *source,
1429
		struct obs_audio_data *in)
1430 1431 1432 1433
{
	size_t i;
	for (i = source->filters.num; i > 0; i--) {
		struct obs_source *filter = source->filters.array[i-1];
1434 1435

		if (filter->context.data && filter->info.filter_audio) {
1436 1437
			in = filter->info.filter_audio(filter->context.data,
					in);
1438 1439 1440 1441 1442 1443 1444 1445
			if (!in)
				return NULL;
		}
	}

	return in;
}

1446
static inline void reset_resampler(obs_source_t *source,
1447
		const struct obs_source_audio *audio)
1448
{
J
jp9000 已提交
1449
	const struct audio_output_info *obs_info;
1450 1451
	struct resample_info output_info;

1452
	obs_info = audio_output_get_info(obs->audio.audio);
1453

1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
	output_info.format           = obs_info->format;
	output_info.samples_per_sec  = obs_info->samples_per_sec;
	output_info.speakers         = obs_info->speakers;

	source->sample_info.format          = audio->format;
	source->sample_info.samples_per_sec = audio->samples_per_sec;
	source->sample_info.speakers        = audio->speakers;

	if (source->sample_info.samples_per_sec == obs_info->samples_per_sec &&
	    source->sample_info.format          == obs_info->format          &&
	    source->sample_info.speakers        == obs_info->speakers) {
		source->audio_failed = false;
		return;
	}

	audio_resampler_destroy(source->resampler);
	source->resampler = audio_resampler_create(&output_info,
			&source->sample_info);

	source->audio_failed = source->resampler == NULL;
	if (source->resampler == NULL)
		blog(LOG_ERROR, "creation of resampler failed");
}

1478
static inline void copy_audio_data(obs_source_t *source,
J
jp9000 已提交
1479
		const uint8_t *const data[], uint32_t frames, uint64_t ts)
1480
{
1481 1482
	size_t planes    = audio_output_get_planes(obs->audio.audio);
	size_t blocksize = audio_output_get_block_size(obs->audio.audio);
1483 1484
	size_t size      = (size_t)frames * blocksize;
	bool   resize    = source->audio_storage_size < size;
1485

J
jp9000 已提交
1486 1487
	source->audio_data.frames    = frames;
	source->audio_data.timestamp = ts;
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500

	for (size_t i = 0; i < planes; i++) {
		/* ensure audio storage capacity */
		if (resize) {
			bfree(source->audio_data.data[i]);
			source->audio_data.data[i] = bmalloc(size);
		}

		memcpy(source->audio_data.data[i], data[i], size);
	}

	if (resize)
		source->audio_storage_size = size;
1501 1502 1503
}

/* resamples/remixes new audio to the designated main audio output format */
1504
static void process_audio(obs_source_t *source,
1505
		const struct obs_source_audio *audio)
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
{
	if (source->sample_info.samples_per_sec != audio->samples_per_sec ||
	    source->sample_info.format          != audio->format          ||
	    source->sample_info.speakers        != audio->speakers)
		reset_resampler(source, audio);

	if (source->audio_failed)
		return;

	if (source->resampler) {
J
jp9000 已提交
1516
		uint8_t  *output[MAX_AV_PLANES];
1517 1518 1519
		uint32_t frames;
		uint64_t offset;

1520 1521 1522 1523 1524
		memset(output, 0, sizeof(output));

		audio_resampler_resample(source->resampler,
				output, &frames, &offset,
				audio->data, audio->frames);
1525

J
jp9000 已提交
1526
		copy_audio_data(source, (const uint8_t *const *)output, frames,
1527 1528 1529 1530 1531
				audio->timestamp - offset);
	} else {
		copy_audio_data(source, audio->data, audio->frames,
				audio->timestamp);
	}
1532 1533
}

1534
void obs_source_output_audio(obs_source_t *source,
1535
		const struct obs_source_audio *audio)
1536
{
J
jp9000 已提交
1537
	uint32_t flags;
1538
	struct obs_audio_data *output;
1539

J
jp9000 已提交
1540 1541 1542 1543
	if (!source || !audio)
		return;

	flags = source->info.output_flags;
1544
	process_audio(source, audio);
1545 1546

	pthread_mutex_lock(&source->filter_mutex);
1547
	output = filter_async_audio(source, &source->audio_data);
1548 1549

	if (output) {
1550
		bool async = (flags & OBS_SOURCE_ASYNC) != 0;
J
jp9000 已提交
1551

1552 1553
		pthread_mutex_lock(&source->audio_mutex);

1554 1555
		/* wait for video to start before outputting any audio so we
		 * have a base for sync */
1556
		if (source->timing_set || !async) {
1557
			struct audio_data data;
1558

J
jp9000 已提交
1559
			for (int i = 0; i < MAX_AV_PLANES; i++)
1560 1561
				data.data[i] = output->data[i];

1562 1563 1564
			data.frames    = output->frames;
			data.timestamp = output->timestamp;
			source_output_audio_line(source, &data);
1565 1566 1567 1568 1569 1570 1571 1572
		}

		pthread_mutex_unlock(&source->audio_mutex);
	}

	pthread_mutex_unlock(&source->filter_mutex);
}

1573
static inline bool frame_out_of_bounds(obs_source_t *source, uint64_t ts)
1574
{
J
jp9000 已提交
1575 1576 1577 1578
	if (ts < source->last_frame_ts)
		return ((source->last_frame_ts - ts) > MAX_TIMESTAMP_JUMP);
	else
		return ((ts - source->last_frame_ts) > MAX_TIMESTAMP_JUMP);
1579 1580
}

J
jp9000 已提交
1581 1582
/* #define DEBUG_ASYNC_FRAMES 1 */

1583
static bool ready_async_frame(obs_source_t *source, uint64_t sys_time)
1584
{
1585 1586
	struct obs_source_frame *next_frame = source->video_frames.array[0];
	struct obs_source_frame *frame      = NULL;
1587 1588 1589 1590
	uint64_t sys_offset = sys_time - source->last_sys_timestamp;
	uint64_t frame_time = next_frame->timestamp;
	uint64_t frame_offset = 0;

J
jp9000 已提交
1591 1592 1593 1594 1595 1596 1597 1598 1599
#if DEBUG_ASYNC_FRAMES
	blog(LOG_DEBUG, "source->last_frame_ts: %llu, frame_time: %llu, "
			"sys_offset: %llu, frame_offset: %llu, "
			"number of frames: %lu",
			source->last_frame_ts, frame_time, sys_offset,
			frame_time - source->last_frame_ts,
			(unsigned long)source->video_frames.num);
#endif

1600 1601
	/* account for timestamp invalidation */
	if (frame_out_of_bounds(source, frame_time)) {
J
jp9000 已提交
1602 1603 1604
#if DEBUG_ASYNC_FRAMES
		blog(LOG_DEBUG, "timing jump");
#endif
1605
		source->last_frame_ts = next_frame->timestamp;
J
jp9000 已提交
1606
		return true;
1607 1608
	} else {
		frame_offset = frame_time - source->last_frame_ts;
J
jp9000 已提交
1609
		source->last_frame_ts += sys_offset;
1610 1611
	}

J
jp9000 已提交
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
	while (source->last_frame_ts > next_frame->timestamp) {

		/* this tries to reduce the needless frame duplication, also
		 * helps smooth out async rendering to frame boundaries.  In
		 * other words, tries to keep the framerate as smooth as
		 * possible */
		if ((source->last_frame_ts - next_frame->timestamp) < 1000000)
			break;

		if (frame)
			da_erase(source->video_frames, 0);

#if DEBUG_ASYNC_FRAMES
		blog(LOG_DEBUG, "new frame, "
				"source->last_frame_ts: %llu, "
				"next_frame->timestamp: %llu",
				source->last_frame_ts,
				next_frame->timestamp);
#endif

1632
		obs_source_frame_destroy(frame);
1633

1634 1635 1636
		if (source->video_frames.num == 1)
			return true;

1637
		frame = next_frame;
J
jp9000 已提交
1638
		next_frame = source->video_frames.array[1];
1639 1640 1641

		/* more timestamp checking and compensating */
		if ((next_frame->timestamp - frame_time) > MAX_TIMESTAMP_JUMP) {
J
jp9000 已提交
1642 1643 1644
#if DEBUG_ASYNC_FRAMES
			blog(LOG_DEBUG, "timing jump");
#endif
1645 1646 1647 1648 1649 1650 1651 1652
			source->last_frame_ts =
				next_frame->timestamp - frame_offset;
		}

		frame_time   = next_frame->timestamp;
		frame_offset = frame_time - source->last_frame_ts;
	}

J
jp9000 已提交
1653 1654 1655 1656
#if DEBUG_ASYNC_FRAMES
	if (!frame)
		blog(LOG_DEBUG, "no frame!");
#endif
1657

1658 1659 1660
	return frame != NULL;
}

1661
static inline struct obs_source_frame *get_closest_frame(obs_source_t *source,
1662 1663
		uint64_t sys_time)
{
1664
	if (ready_async_frame(source, sys_time)) {
1665
		struct obs_source_frame *frame = source->video_frames.array[0];
1666 1667 1668 1669 1670
		da_erase(source->video_frames, 0);
		return frame;
	}

	return NULL;
1671 1672
}

1673
/*
1674 1675
 * Ensures that cached frames are displayed on time.  If multiple frames
 * were cached between renders, then releases the unnecessary frames and uses
1676 1677
 * the frame with the closest timing to ensure sync.  Also ensures that timing
 * with audio is synchronized.
1678
 */
1679
struct obs_source_frame *obs_source_get_frame(obs_source_t *source)
J
jp9000 已提交
1680
{
1681
	struct obs_source_frame *frame = NULL;
1682
	uint64_t sys_time;
1683

J
jp9000 已提交
1684 1685 1686
	if (!source)
		return NULL;

1687 1688
	pthread_mutex_lock(&source->video_mutex);

J
jp9000 已提交
1689 1690
	sys_time = os_gettime_ns();

1691 1692 1693
	if (!source->video_frames.num)
		goto unlock;

1694 1695
	if (!source->last_frame_ts) {
		frame = source->video_frames.array[0];
1696 1697
		da_erase(source->video_frames, 0);

1698
		source->last_frame_ts = frame->timestamp;
1699
	} else {
1700
		frame = get_closest_frame(source, sys_time);
J
jp9000 已提交
1701 1702 1703 1704 1705 1706
	}

	/* reset timing to current system time */
	if (frame) {
		source->timing_adjust = sys_time - frame->timestamp;
		source->timing_set = true;
1707 1708
	}

J
jp9000 已提交
1709
unlock:
1710 1711 1712
	source->last_sys_timestamp = sys_time;

	pthread_mutex_unlock(&source->video_mutex);
1713

1714
	if (frame)
1715 1716
		obs_source_addref(source);

1717
	return frame;
J
jp9000 已提交
1718 1719
}

1720
void obs_source_release_frame(obs_source_t *source,
1721
		struct obs_source_frame *frame)
J
jp9000 已提交
1722
{
J
jp9000 已提交
1723
	if (source && frame) {
1724
		obs_source_frame_destroy(frame);
1725 1726
		obs_source_release(source);
	}
J
jp9000 已提交
1727
}
1728

1729
const char *obs_source_get_name(obs_source_t *source)
1730
{
1731
	return source ? source->context.name : NULL;
1732 1733
}

1734
void obs_source_set_name(obs_source_t *source, const char *name)
1735
{
J
jp9000 已提交
1736
	if (!source) return;
J
jp9000 已提交
1737 1738 1739 1740 1741 1742 1743

	if (!name || !*name || strcmp(name, source->context.name) != 0) {
		struct calldata data;
		char *prev_name = bstrdup(source->context.name);
		obs_context_data_setname(&source->context, name);

		calldata_init(&data);
1744 1745 1746
		calldata_set_ptr(&data, "source", source);
		calldata_set_string(&data, "new_name", source->context.name);
		calldata_set_string(&data, "prev_name", prev_name);
J
jp9000 已提交
1747 1748 1749 1750 1751
		signal_handler_signal(obs->signals, "source_rename", &data);
		signal_handler_signal(source->context.signals, "rename", &data);
		calldata_free(&data);
		bfree(prev_name);
	}
1752 1753
}

1754
enum obs_source_type obs_source_get_type(obs_source_t *source)
1755
{
J
jp9000 已提交
1756 1757
	return source ? source->info.type : OBS_SOURCE_TYPE_INPUT;
}
J
jp9000 已提交
1758

1759
const char *obs_source_get_id(obs_source_t *source)
J
jp9000 已提交
1760 1761
{
	return source ? source->info.id : NULL;
1762
}
1763

1764 1765
static inline void render_filter_bypass(obs_source_t *target,
		gs_effect_t *effect, bool use_matrix)
1766
{
J
jp9000 已提交
1767
	const char  *tech_name = use_matrix ? "DrawMatrix" : "Draw";
1768
	gs_technique_t *tech    = gs_effect_get_technique(effect, tech_name);
1769 1770
	size_t      passes, i;

1771
	passes = gs_technique_begin(tech);
1772
	for (i = 0; i < passes; i++) {
1773
		gs_technique_begin_pass(tech, i);
1774
		obs_source_video_render(target);
1775
		gs_technique_end_pass(tech);
1776
	}
1777
	gs_technique_end(tech);
1778 1779
}

1780
static inline void render_filter_tex(gs_texture_t *tex, gs_effect_t *effect,
J
jp9000 已提交
1781
		uint32_t width, uint32_t height, bool use_matrix)
1782
{
J
jp9000 已提交
1783
	const char  *tech_name = use_matrix ? "DrawMatrix" : "Draw";
1784 1785
	gs_technique_t *tech    = gs_effect_get_technique(effect, tech_name);
	gs_eparam_t    *image   = gs_effect_get_param_by_name(effect, "image");
1786 1787
	size_t      passes, i;

1788
	gs_effect_set_texture(image, tex);
1789

1790
	passes = gs_technique_begin(tech);
1791
	for (i = 0; i < passes; i++) {
1792
		gs_technique_begin_pass(tech, i);
1793
		gs_draw_sprite(tex, width, height, 0);
1794
		gs_technique_end_pass(tech);
1795
	}
1796
	gs_technique_end(tech);
1797 1798
}

1799
void obs_source_process_filter(obs_source_t *filter, gs_effect_t *effect,
J
jp9000 已提交
1800
		uint32_t width, uint32_t height, enum gs_color_format format,
1801
		enum obs_allow_direct_render allow_direct)
1802
{
1803
	obs_source_t *target, *parent;
J
jp9000 已提交
1804 1805 1806 1807 1808 1809
	uint32_t     target_flags, parent_flags;
	int          cx, cy;
	bool         use_matrix, expects_def, can_directly;

	if (!filter) return;

1810 1811
	target       = obs_filter_get_target(filter);
	parent       = obs_filter_get_parent(filter);
J
jp9000 已提交
1812 1813
	target_flags = target->info.output_flags;
	parent_flags = parent->info.output_flags;
1814 1815
	cx           = obs_source_get_width(target);
	cy           = obs_source_get_height(target);
J
jp9000 已提交
1816 1817
	use_matrix   = !!(target_flags & OBS_SOURCE_COLOR_MATRIX);
	expects_def  = !(parent_flags & OBS_SOURCE_CUSTOM_DRAW);
1818
	can_directly = allow_direct == OBS_ALLOW_DIRECT_RENDERING;
1819 1820 1821 1822 1823

	/* if the parent does not use any custom effects, and this is the last
	 * filter in the chain for the parent, then render the parent directly
	 * using the filter effect instead of rendering to texture to reduce
	 * the total number of passes */
1824
	if (can_directly && expects_def && target == parent) {
J
jp9000 已提交
1825
		render_filter_bypass(target, effect, use_matrix);
1826 1827 1828
		return;
	}

J
jp9000 已提交
1829
	if (!filter->filter_texrender)
1830
		filter->filter_texrender = gs_texrender_create(format,
J
jp9000 已提交
1831 1832
				GS_ZS_NONE);

1833
	if (gs_texrender_begin(filter->filter_texrender, cx, cy)) {
1834
		gs_ortho(0.0f, (float)cx, 0.0f, (float)cy, -100.0f, 100.0f);
1835
		if (expects_def && parent == target)
J
jp9000 已提交
1836
			obs_source_default_render(parent, use_matrix);
1837 1838
		else
			obs_source_video_render(target);
1839
		gs_texrender_end(filter->filter_texrender);
1840 1841 1842 1843
	}

	/* --------------------------- */

1844
	render_filter_tex(gs_texrender_get_texture(filter->filter_texrender),
J
jp9000 已提交
1845
			effect, width, height, use_matrix);
1846
}
1847

1848
signal_handler_t *obs_source_get_signal_handler(obs_source_t *source)
1849
{
1850
	return source ? source->context.signals : NULL;
1851 1852
}

1853
proc_handler_t *obs_source_get_proc_handler(obs_source_t *source)
1854
{
1855
	return source ? source->context.procs : NULL;
1856
}
J
jp9000 已提交
1857

1858
void obs_source_set_volume(obs_source_t *source, float volume)
J
jp9000 已提交
1859
{
J
jp9000 已提交
1860 1861
	if (source) {
		struct calldata data = {0};
1862 1863
		calldata_set_ptr(&data, "source", source);
		calldata_set_float(&data, "volume", volume);
J
jp9000 已提交
1864

1865
		signal_handler_signal(source->context.signals, "volume", &data);
1866
		signal_handler_signal(obs->signals, "source_volume", &data);
J
jp9000 已提交
1867

1868
		volume = (float)calldata_float(&data, "volume");
J
jp9000 已提交
1869 1870
		calldata_free(&data);

J
jp9000 已提交
1871
		source->user_volume = volume;
J
jp9000 已提交
1872
	}
J
jp9000 已提交
1873 1874
}

1875
static void set_tree_preset_vol(obs_source_t *parent, obs_source_t *child,
J
jp9000 已提交
1876 1877 1878 1879 1880 1881 1882 1883
		void *param)
{
	float *vol = param;
	child->present_volume = *vol;

	UNUSED_PARAMETER(parent);
}

1884
void obs_source_set_present_volume(obs_source_t *source, float volume)
J
jp9000 已提交
1885
{
J
jp9000 已提交
1886
	if (source) {
J
jp9000 已提交
1887
		source->present_volume = volume;
J
jp9000 已提交
1888 1889 1890 1891 1892 1893 1894 1895

		/* don't set the presentation volume of the tree if a
		 * transition source, let the transition handle presentation
		 * volume for the child sources itself. */
		if (source->info.type != OBS_SOURCE_TYPE_TRANSITION)
			obs_source_enum_tree(source, set_tree_preset_vol,
					&volume);
	}
J
jp9000 已提交
1896 1897
}

1898
float obs_source_get_volume(obs_source_t *source)
J
jp9000 已提交
1899
{
J
jp9000 已提交
1900
	return source ? source->user_volume : 0.0f;
J
jp9000 已提交
1901 1902
}

1903
float obs_source_get_present_volume(obs_source_t *source)
J
jp9000 已提交
1904
{
J
jp9000 已提交
1905 1906 1907
	return source ? source->present_volume : 0.0f;
}

1908
void obs_source_set_sync_offset(obs_source_t *source, int64_t offset)
J
jp9000 已提交
1909 1910 1911 1912 1913
{
	if (source)
		source->sync_offset = offset;
}

1914
int64_t obs_source_get_sync_offset(obs_source_t *source)
J
jp9000 已提交
1915 1916
{
	return source ? source->sync_offset : 0;
J
jp9000 已提交
1917
}
1918 1919 1920 1921 1922 1923

struct source_enum_data {
	obs_source_enum_proc_t enum_callback;
	void *param;
};

1924
static void enum_source_tree_callback(obs_source_t *parent, obs_source_t *child,
1925 1926 1927 1928 1929
		void *param)
{
	struct source_enum_data *data = param;

	if (child->info.enum_sources && !child->enum_refs) {
J
jp9000 已提交
1930
		os_atomic_inc_long(&child->enum_refs);
1931

1932 1933 1934
		if (child->context.data)
			child->info.enum_sources(child->context.data,
					enum_source_tree_callback, data);
1935

J
jp9000 已提交
1936
		os_atomic_dec_long(&child->enum_refs);
1937 1938 1939 1940 1941
	}

	data->enum_callback(parent, child, data->param);
}

1942
void obs_source_enum_sources(obs_source_t *source,
1943 1944 1945
		obs_source_enum_proc_t enum_callback,
		void *param)
{
1946 1947 1948
	if (!source_valid(source)      ||
	    !source->info.enum_sources ||
	    source->enum_refs)
1949 1950 1951 1952
		return;

	obs_source_addref(source);

J
jp9000 已提交
1953
	os_atomic_inc_long(&source->enum_refs);
1954
	source->info.enum_sources(source->context.data, enum_callback, param);
J
jp9000 已提交
1955
	os_atomic_dec_long(&source->enum_refs);
1956 1957 1958 1959

	obs_source_release(source);
}

1960
void obs_source_enum_tree(obs_source_t *source,
1961 1962 1963 1964 1965
		obs_source_enum_proc_t enum_callback,
		void *param)
{
	struct source_enum_data data = {enum_callback, param};

1966 1967 1968
	if (!source_valid(source)      ||
	    !source->info.enum_sources ||
	    source->enum_refs)
1969 1970 1971 1972
		return;

	obs_source_addref(source);

J
jp9000 已提交
1973
	os_atomic_inc_long(&source->enum_refs);
1974 1975
	source->info.enum_sources(source->context.data,
			enum_source_tree_callback,
1976
			&data);
J
jp9000 已提交
1977
	os_atomic_dec_long(&source->enum_refs);
1978 1979 1980

	obs_source_release(source);
}
1981

1982
void obs_source_add_child(obs_source_t *parent, obs_source_t *child)
1983 1984 1985
{
	if (!parent || !child) return;

1986 1987 1988 1989 1990
	for (int i = 0; i < parent->show_refs; i++) {
		enum view_type type;
		type = (i < parent->activate_refs) ? MAIN_VIEW : AUX_VIEW;
		obs_source_activate(child, type);
	}
1991 1992
}

1993
void obs_source_remove_child(obs_source_t *parent, obs_source_t *child)
1994 1995 1996
{
	if (!parent || !child) return;

1997 1998 1999 2000 2001
	for (int i = 0; i < parent->show_refs; i++) {
		enum view_type type;
		type = (i < parent->activate_refs) ? MAIN_VIEW : AUX_VIEW;
		obs_source_deactivate(child, type);
	}
2002
}
J
jp9000 已提交
2003

2004
static void reset_transition_vol(obs_source_t *parent, obs_source_t *child,
J
jp9000 已提交
2005 2006 2007 2008 2009 2010 2011 2012
		void *param)
{
	child->transition_volume = 0.0f;

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

2013
static void add_transition_vol(obs_source_t *parent, obs_source_t *child,
J
jp9000 已提交
2014 2015 2016 2017 2018 2019 2020 2021
		void *param)
{
	float *vol = param;
	child->transition_volume += *vol;

	UNUSED_PARAMETER(parent);
}

2022
static void apply_transition_vol(obs_source_t *parent, obs_source_t *child,
J
jp9000 已提交
2023 2024 2025 2026 2027 2028 2029 2030
		void *param)
{
	child->present_volume = child->transition_volume;

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

2031
void obs_transition_begin_frame(obs_source_t *transition)
J
jp9000 已提交
2032 2033 2034 2035 2036
{
	if (!transition) return;
	obs_source_enum_tree(transition, reset_transition_vol, NULL);
}

2037
void obs_source_set_transition_vol(obs_source_t *source, float vol)
J
jp9000 已提交
2038 2039 2040 2041 2042 2043 2044
{
	if (!source) return;

	add_transition_vol(NULL, source, &vol);
	obs_source_enum_tree(source, add_transition_vol, &vol);
}

2045
void obs_transition_end_frame(obs_source_t *transition)
J
jp9000 已提交
2046 2047 2048 2049
{
	if (!transition) return;
	obs_source_enum_tree(transition, apply_transition_vol, NULL);
}
2050

2051
void obs_source_save(obs_source_t *source)
2052
{
2053
	if (!source_valid(source) || !source->info.save) return;
2054 2055 2056
	source->info.save(source->context.data, source->context.settings);
}

2057
void obs_source_load(obs_source_t *source)
2058
{
2059
	if (!source_valid(source) || !source->info.load) return;
2060 2061
	source->info.load(source->context.data, source->context.settings);
}