obs-source.c 50.2 KB
Newer Older
J
jp9000 已提交
1
/******************************************************************************
2
    Copyright (C) 2013-2014 by Hugh Bailey <obs.jim@gmail.com>
J
jp9000 已提交
3 4 5

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
6
    the Free Software Foundation, either version 2 of the License, or
J
jp9000 已提交
7 8 9 10 11 12 13 14 15 16 17
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/

18 19
#include <inttypes.h>

20
#include "media-io/format-conversion.h"
21
#include "media-io/video-frame.h"
22
#include "media-io/audio-io.h"
J
jp9000 已提交
23
#include "util/threading.h"
24
#include "util/platform.h"
25
#include "callback/calldata.h"
26 27
#include "graphics/matrix3.h"
#include "graphics/vec3.h"
28

J
jp9000 已提交
29
#include "obs.h"
J
jp9000 已提交
30
#include "obs-internal.h"
J
jp9000 已提交
31

32 33 34 35 36
static inline bool source_valid(struct obs_source *source)
{
	return source && source->context.data;
}

37
const struct obs_source_info *find_source(struct darray *list, const char *id)
J
jp9000 已提交
38 39
{
	size_t i;
J
jp9000 已提交
40
	struct obs_source_info *array = list->array;
J
jp9000 已提交
41 42

	for (i = 0; i < list->num; i++) {
J
jp9000 已提交
43
		struct obs_source_info *info = array+i;
44
		if (strcmp(info->id, id) == 0)
J
jp9000 已提交
45 46 47 48 49 50
			return info;
	}

	return NULL;
}

J
jp9000 已提交
51
static const struct obs_source_info *get_source_info(enum obs_source_type type,
52 53 54 55 56
		const char *id)
{
	struct darray *list = NULL;

	switch (type) {
J
jp9000 已提交
57 58 59 60 61 62 63 64 65 66 67
	case OBS_SOURCE_TYPE_INPUT:
		list = &obs->input_types.da;
		break;

	case OBS_SOURCE_TYPE_FILTER:
		list = &obs->filter_types.da;
		break;

	case OBS_SOURCE_TYPE_TRANSITION:
		list = &obs->transition_types.da;
		break;
68 69 70 71 72
	}

	return find_source(list, id);
}

73 74 75 76 77 78 79 80
static const char *source_signals[] = {
	"void destroy(ptr source)",
	"void add(ptr source)",
	"void remove(ptr source)",
	"void activate(ptr source)",
	"void deactivate(ptr source)",
	"void show(ptr source)",
	"void hide(ptr source)",
J
jp9000 已提交
81
	"void rename(ptr source, string new_name, string prev_name)",
82
	"void volume(ptr source, in out float volume)",
83 84
	"void volume_level(ptr source, float level, float magnitude, "
		"float peak)",
85 86 87
	NULL
};

88 89
bool obs_source_init_context(struct obs_source *source,
		obs_data_t settings, const char *name)
90
{
91
	if (!obs_context_data_init(&source->context, settings, name))
92 93
		return false;

94 95
	return signal_handler_add_array(source->context.signals,
			source_signals);
96 97
}

98
const char *obs_source_getdisplayname(enum obs_source_type type, const char *id)
99
{
J
jp9000 已提交
100
	const struct obs_source_info *info = get_source_info(type, id);
101
	return (info != NULL) ? info->getname() : NULL;
102 103
}

104
/* internal initialization */
J
jp9000 已提交
105 106
bool obs_source_init(struct obs_source *source,
		const struct obs_source_info *info)
J
jp9000 已提交
107
{
108
	source->refs = 1;
J
jp9000 已提交
109
	source->user_volume = 1.0f;
110
	source->present_volume = 0.0f;
J
jp9000 已提交
111
	source->sync_offset = 0;
112 113 114
	pthread_mutex_init_value(&source->filter_mutex);
	pthread_mutex_init_value(&source->video_mutex);
	pthread_mutex_init_value(&source->audio_mutex);
115

116 117 118 119 120 121
	if (pthread_mutex_init(&source->filter_mutex, NULL) != 0)
		return false;
	if (pthread_mutex_init(&source->audio_mutex, NULL) != 0)
		return false;
	if (pthread_mutex_init(&source->video_mutex, NULL) != 0)
		return false;
J
jp9000 已提交
122

123
	if (info && info->output_flags & OBS_SOURCE_AUDIO) {
J
jp9000 已提交
124
		source->audio_line = audio_output_createline(obs->audio.audio,
125
				source->context.name);
126 127
		if (!source->audio_line) {
			blog(LOG_ERROR, "Failed to create audio line for "
128
			                "source '%s'", source->context.name);
129 130 131
			return false;
		}
	}
132

133 134 135
	obs_context_data_insert(&source->context,
			&obs->data.sources_mutex,
			&obs->data.first_source);
136
	return true;
J
jp9000 已提交
137 138
}

139
static inline void obs_source_dosignal(struct obs_source *source,
140
		const char *signal_obs, const char *signal_source)
141 142 143 144 145
{
	struct calldata data;

	calldata_init(&data);
	calldata_setptr(&data, "source", source);
146 147 148
	if (signal_obs)
		signal_handler_signal(obs->signals, signal_obs, &data);
	if (signal_source)
149 150
		signal_handler_signal(source->context.signals, signal_source,
				&data);
151 152 153
	calldata_free(&data);
}

154
obs_source_t obs_source_create(enum obs_source_type type, const char *id,
155
		const char *name, obs_data_t settings)
J
jp9000 已提交
156
{
157
	struct obs_source *source = bzalloc(sizeof(struct obs_source));
J
jp9000 已提交
158

J
jp9000 已提交
159
	const struct obs_source_info *info = get_source_info(type, id);
J
jp9000 已提交
160
	if (!info) {
P
Palana 已提交
161
		blog(LOG_ERROR, "Source ID '%s' not found", id);
J
jp9000 已提交
162

163 164 165 166 167 168
		source->info.id      = bstrdup(id);
		source->info.type    = type;
		source->owns_info_id = true;
	} else {
		source->info = *info;
	}
169

170
	if (!obs_source_init_context(source, settings, name))
171 172
		goto fail;

173
	if (info && info->defaults)
174
		info->defaults(source->context.settings);
J
jp9000 已提交
175

176 177
	/* allow the source to be created even if creation fails so that the
	 * user's data doesn't become lost */
178 179 180
	if (info)
		source->context.data = info->create(source->context.settings,
				source);
181
	if (!source->context.data)
182
		blog(LOG_ERROR, "Failed to create source '%s'!", name);
183

J
jp9000 已提交
184
	if (!obs_source_init(source, info))
185
		goto fail;
J
jp9000 已提交
186

187
	blog(LOG_INFO, "source '%s' (%s) created", name, id);
188
	obs_source_dosignal(source, "source_create", NULL);
J
jp9000 已提交
189
	return source;
190 191 192 193 194

fail:
	blog(LOG_ERROR, "obs_source_create failed");
	obs_source_destroy(source);
	return NULL;
J
jp9000 已提交
195 196
}

197 198
void source_frame_init(struct source_frame *frame, enum video_format format,
		uint32_t width, uint32_t height)
199
{
200
	struct video_frame vid_frame;
J
jp9000 已提交
201 202 203 204

	if (!frame)
		return;

205
	video_frame_init(&vid_frame, format, width, height);
206 207 208
	frame->format = format;
	frame->width  = width;
	frame->height = height;
209

210 211 212
	for (size_t i = 0; i < MAX_AV_PLANES; i++) {
		frame->data[i]     = vid_frame.data[i];
		frame->linesize[i] = vid_frame.linesize[i];
213 214 215
	}
}

216
void obs_source_destroy(struct obs_source *source)
J
jp9000 已提交
217
{
218
	size_t i;
219

J
jp9000 已提交
220 221 222
	if (!source)
		return;

223 224
	obs_context_data_remove(&source->context);

225 226
	blog(LOG_INFO, "source '%s' destroyed", source->context.name);

227
	obs_source_dosignal(source, "source_destroy", "destroy");
228

229
	if (source->context.data) {
230
		source->info.destroy(source->context.data);
231 232
		source->context.data = NULL;
	}
233

234 235
	if (source->filter_parent)
		obs_source_filter_remove(source->filter_parent, source);
236

237 238
	for (i = 0; i < source->filters.num; i++)
		obs_source_release(source->filters.array[i]);
239

240 241
	for (i = 0; i < source->video_frames.num; i++)
		source_frame_destroy(source->video_frames.array[i]);
242

243
	gs_entercontext(obs->video.graphics);
P
Palana 已提交
244
	texrender_destroy(source->async_convert_texrender);
245
	texture_destroy(source->async_texture);
246
	gs_leavecontext();
J
jp9000 已提交
247

J
jp9000 已提交
248
	for (i = 0; i < MAX_AV_PLANES; i++)
249 250
		bfree(source->audio_data.data[i]);

251 252 253
	audio_line_destroy(source->audio_line);
	audio_resampler_destroy(source->resampler);

J
jp9000 已提交
254
	texrender_destroy(source->filter_texrender);
255 256 257 258 259
	da_free(source->video_frames);
	da_free(source->filters);
	pthread_mutex_destroy(&source->filter_mutex);
	pthread_mutex_destroy(&source->audio_mutex);
	pthread_mutex_destroy(&source->video_mutex);
260
	obs_context_data_free(&source->context);
261 262 263 264
	
	if (source->owns_info_id)
		bfree((void*)source->info.id);

265 266 267
	bfree(source);
}

P
Palana 已提交
268
void obs_source_addref(obs_source_t source)
269
{
P
Palana 已提交
270
	if (source)
J
jp9000 已提交
271
		os_atomic_inc_long(&source->refs);
272 273
}

P
Palana 已提交
274
void obs_source_release(obs_source_t source)
275
{
P
Palana 已提交
276 277
	if (!source)
		return;
278

J
jp9000 已提交
279
	if (os_atomic_dec_long(&source->refs) == 0)
P
Palana 已提交
280
		obs_source_destroy(source);
281 282 283 284
}

void obs_source_remove(obs_source_t source)
{
285
	struct obs_core_data *data = &obs->data;
286
	size_t id;
287
	bool   exists;
288 289 290

	pthread_mutex_lock(&data->sources_mutex);

J
jp9000 已提交
291 292
	if (!source || source->removed) {
		pthread_mutex_unlock(&data->sources_mutex);
J
jp9000 已提交
293
		return;
J
jp9000 已提交
294
	}
J
jp9000 已提交
295

J
jp9000 已提交
296
	source->removed = true;
J
jp9000 已提交
297

J
jp9000 已提交
298 299
	obs_source_addref(source);

300 301 302 303
	id = da_find(data->user_sources, &source, 0);
	exists = (id != DARRAY_INVALID);
	if (exists) {
		da_erase(data->user_sources, id);
J
jp9000 已提交
304
		obs_source_release(source);
305 306 307
	}

	pthread_mutex_unlock(&data->sources_mutex);
J
jp9000 已提交
308

309 310 311
	if (exists)
		obs_source_dosignal(source, "source_remove", "remove");

J
jp9000 已提交
312
	obs_source_release(source);
313 314 315 316
}

bool obs_source_removed(obs_source_t source)
{
J
jp9000 已提交
317
	return source ? source->removed : true;
J
jp9000 已提交
318 319
}

J
jp9000 已提交
320 321 322 323 324 325 326 327
static inline obs_data_t get_defaults(const struct obs_source_info *info)
{
	obs_data_t settings = obs_data_create();
	if (info->defaults)
		info->defaults(settings);
	return settings;
}

J
jp9000 已提交
328 329 330
obs_data_t obs_source_settings(enum obs_source_type type, const char *id)
{
	const struct obs_source_info *info = get_source_info(type, id);
J
jp9000 已提交
331
	return (info) ? get_defaults(info) : NULL;
J
jp9000 已提交
332 333
}

334
obs_properties_t obs_get_source_properties(enum obs_source_type type,
335
		const char *id)
J
jp9000 已提交
336
{
J
jp9000 已提交
337
	const struct obs_source_info *info = get_source_info(type, id);
J
jp9000 已提交
338 339 340 341
	if (info && info->properties) {
		obs_data_t       defaults = get_defaults(info);
		obs_properties_t properties;

342
		properties = info->properties();
J
jp9000 已提交
343 344 345 346
		obs_properties_apply_settings(properties, defaults);
		obs_data_release(defaults);
		return properties;
	}
J
jp9000 已提交
347 348 349
	return NULL;
}

350
obs_properties_t obs_source_properties(obs_source_t source)
351
{
352
	if (source_valid(source) && source->info.properties) {
J
jp9000 已提交
353
		obs_properties_t props;
354
		props = source->info.properties();
355
		obs_properties_apply_settings(props, source->context.settings);
J
jp9000 已提交
356 357 358
		return props;
	}

359 360 361
	return NULL;
}

362
uint32_t obs_source_get_output_flags(obs_source_t source)
J
jp9000 已提交
363
{
J
jp9000 已提交
364
	return source ? source->info.output_flags : 0;
J
jp9000 已提交
365 366
}

367 368
static void obs_source_deferred_update(obs_source_t source)
{
369 370 371 372
	if (source->context.data && source->info.update)
		source->info.update(source->context.data,
				source->context.settings);

373 374 375
	source->defer_update = false;
}

376
void obs_source_update(obs_source_t source, obs_data_t settings)
J
jp9000 已提交
377
{
J
jp9000 已提交
378 379
	if (!source) return;

380 381 382 383 384 385 386 387
	if (settings)
		obs_data_apply(source->context.settings, settings);

	if (source->info.output_flags & OBS_SOURCE_VIDEO) {
		source->defer_update = true;
	} else if (source->context.data && source->info.update) {
		source->info.update(source->context.data,
				source->context.settings);
388
	}
J
jp9000 已提交
389 390
}

391
static void activate_source(obs_source_t source)
J
jp9000 已提交
392
{
393
	if (source->context.data && source->info.activate)
394
		source->info.activate(source->context.data);
395
	obs_source_dosignal(source, "source_activate", "activate");
J
jp9000 已提交
396 397
}

398
static void deactivate_source(obs_source_t source)
J
jp9000 已提交
399
{
400
	if (source->context.data && source->info.deactivate)
401
		source->info.deactivate(source->context.data);
402
	obs_source_dosignal(source, "source_deactivate", "deactivate");
403
}
404

405 406
static void show_source(obs_source_t source)
{
407
	if (source->context.data && source->info.show)
408
		source->info.show(source->context.data);
409
	obs_source_dosignal(source, "source_show", "show");
410 411 412 413
}

static void hide_source(obs_source_t source)
{
414
	if (source->context.data && source->info.hide)
415
		source->info.hide(source->context.data);
416
	obs_source_dosignal(source, "source_hide", "hide");
417 418 419 420
}

static void activate_tree(obs_source_t parent, obs_source_t child, void *param)
{
J
jp9000 已提交
421
	if (os_atomic_inc_long(&child->activate_refs) == 1)
422
		activate_source(child);
J
jp9000 已提交
423 424 425

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
426 427 428 429 430
}

static void deactivate_tree(obs_source_t parent, obs_source_t child,
		void *param)
{
J
jp9000 已提交
431
	if (os_atomic_dec_long(&child->activate_refs) == 0)
432
		deactivate_source(child);
J
jp9000 已提交
433 434 435

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
436 437
}

438 439
static void show_tree(obs_source_t parent, obs_source_t child, void *param)
{
J
jp9000 已提交
440
	if (os_atomic_inc_long(&child->show_refs) == 1)
441 442 443 444 445 446 447 448
		show_source(child);

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

static void hide_tree(obs_source_t parent, obs_source_t child, void *param)
{
J
jp9000 已提交
449
	if (os_atomic_dec_long(&child->show_refs) == 0)
450 451 452 453 454 455 456
		hide_source(child);

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

void obs_source_activate(obs_source_t source, enum view_type type)
457 458 459
{
	if (!source) return;

J
jp9000 已提交
460
	if (os_atomic_inc_long(&source->show_refs) == 1) {
461 462 463 464 465
		show_source(source);
		obs_source_enum_tree(source, show_tree, NULL);
	}

	if (type == MAIN_VIEW) {
J
jp9000 已提交
466
		if (os_atomic_inc_long(&source->activate_refs) == 1) {
467 468 469 470
			activate_source(source);
			obs_source_enum_tree(source, activate_tree, NULL);
			obs_source_set_present_volume(source, 1.0f);
		}
471 472 473
	}
}

474
void obs_source_deactivate(obs_source_t source, enum view_type type)
475 476 477
{
	if (!source) return;

J
jp9000 已提交
478
	if (os_atomic_dec_long(&source->show_refs) == 0) {
479 480 481 482 483
		hide_source(source);
		obs_source_enum_tree(source, hide_tree, NULL);
	}

	if (type == MAIN_VIEW) {
J
jp9000 已提交
484
		if (os_atomic_dec_long(&source->activate_refs) == 0) {
485 486 487 488
			deactivate_source(source);
			obs_source_enum_tree(source, deactivate_tree, NULL);
			obs_source_set_present_volume(source, 0.0f);
		}
489
	}
J
jp9000 已提交
490 491
}

492
void obs_source_video_tick(obs_source_t source, float seconds)
J
jp9000 已提交
493
{
J
jp9000 已提交
494 495
	if (!source) return;

496 497 498
	if (source->defer_update)
		obs_source_deferred_update(source);

J
jp9000 已提交
499 500 501 502
	/* reset the filter render texture information once every frame */
	if (source->filter_texrender)
		texrender_reset(source->filter_texrender);

503
	if (source->context.data && source->info.video_tick)
504
		source->info.video_tick(source->context.data, seconds);
J
jp9000 已提交
505 506
}

507
/* unless the value is 3+ hours worth of frames, this won't overflow */
J
jp9000 已提交
508
static inline uint64_t conv_frames_to_time(size_t frames)
509
{
J
jp9000 已提交
510 511
	const struct audio_output_info *info;
	info = audio_output_getinfo(obs->audio.audio);
512 513 514

	return (uint64_t)frames * 1000000000ULL /
		(uint64_t)info->samples_per_sec;
515 516
}

517
/* maximum "direct" timestamp variance in nanoseconds */
518
#define MAX_TS_VAR          5000000000ULL
519
/* maximum time that timestamp can jump in nanoseconds */
520 521 522 523
#define MAX_TIMESTAMP_JUMP  2000000000ULL
/* time threshold in nanoseconds to ensure audio timing is as seamless as
 * possible */
#define TS_SMOOTHING_THRESHOLD 70000000ULL
524 525 526 527 528 529

static inline void reset_audio_timing(obs_source_t source, uint64_t timetamp)
{
	source->timing_set    = true;
	source->timing_adjust = os_gettime_ns() - timetamp;
}
530

531 532
static inline void handle_ts_jump(obs_source_t source, uint64_t expected,
		uint64_t ts, uint64_t diff)
533
{
J
jp9000 已提交
534
	blog(LOG_DEBUG, "Timestamp for source '%s' jumped by '%"PRIu64"', "
535
	                "expected value %"PRIu64", input value %"PRIu64,
536
	                source->context.name, diff, expected, ts);
537 538

	/* if has video, ignore audio data until reset */
539
	if (source->info.output_flags & OBS_SOURCE_ASYNC)
540
		os_atomic_dec_long(&source->av_sync_ref);
541
	else
542 543 544
		reset_audio_timing(source, ts);
}

545 546 547 548 549 550 551 552 553 554
#define VOL_MIN -96.0f
#define VOL_MAX  0.0f

static inline float to_db(float val)
{
	float db = 20.0f * log10f(val);
	return isfinite(db) ? db : VOL_MIN;
}

static void calc_volume_levels(struct obs_source *source, float *array,
555
		size_t frames, float volume)
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
{
	float sum_val = 0.0f;
	float max_val = 0.0f;
	float rms_val = 0.0f;

	const uint32_t sample_rate    = audio_output_samplerate(obs_audio());
	const size_t   channels       = audio_output_channels(obs_audio());
	const size_t   count          = frames * channels;
	const size_t   vol_peak_delay = sample_rate * 3;
	const float    alpha          = 0.15f;

	for (size_t i = 0; i < count; i++) {
		float val      = array[i];
		float val_pow2 = val * val;

		sum_val += val_pow2;
		max_val  = fmaxf(max_val, val_pow2);
	}

575 576 577 578 579 580 581 582 583
	/*
	  We want the volume meters scale linearly in respect to current
	  volume, so, no need to apply volume here.
	*/

	UNUSED_PARAMETER(volume);

	rms_val = to_db(sqrtf(sum_val / (float)count));
	max_val = to_db(sqrtf(max_val));
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608

	if (max_val > source->vol_max)
		source->vol_max = max_val;
	else
		source->vol_max = alpha * source->vol_max +
			(1.0f - alpha) * max_val;

	if (source->vol_max > source->vol_peak ||
	    source->vol_update_count > vol_peak_delay) {
		source->vol_peak         = source->vol_max;
		source->vol_update_count = 0;
	} else {
		source->vol_update_count += count;
	}

	source->vol_mag = alpha * rms_val + source->vol_mag * (1.0f - alpha);
}

/* TODO update peak/etc later */
static void obs_source_update_volume_level(obs_source_t source,
		struct audio_data *in)
{
	if (source && in) {
		struct calldata data = {0};

609 610
		calc_volume_levels(source, (float*)in->data[0], in->frames,
				in->volume);
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625

		calldata_setptr  (&data, "source",    source);
		calldata_setfloat(&data, "level",     source->vol_max);
		calldata_setfloat(&data, "magnitude", source->vol_mag);
		calldata_setfloat(&data, "peak",      source->vol_peak);

		signal_handler_signal(source->context.signals, "volume_level",
				&data);
		signal_handler_signal(obs->signals, "source_volume_level",
				&data);

		calldata_free(&data);
	}
}

626 627 628 629
static void source_output_audio_line(obs_source_t source,
		const struct audio_data *data)
{
	struct audio_data in = *data;
630
	uint64_t diff;
631 632

	if (!source->timing_set) {
633
		reset_audio_timing(source, in.timestamp);
634 635

		/* detects 'directly' set timestamps as long as they're within
636
		 * a certain threshold */
637
		if ((source->timing_adjust + MAX_TS_VAR) < MAX_TS_VAR * 2)
638
			source->timing_adjust = 0;
639

640
	} else {
641
		bool ts_under = (in.timestamp < source->next_audio_ts_min);
642

643 644 645 646 647
		diff = ts_under ?
			(source->next_audio_ts_min - in.timestamp) :
			(in.timestamp - source->next_audio_ts_min);

		/* smooth audio if lower or within threshold */
648
		if (diff > MAX_TIMESTAMP_JUMP)
649 650 651 652
			handle_ts_jump(source, source->next_audio_ts_min,
					in.timestamp, diff);
		else if (ts_under || diff < TS_SMOOTHING_THRESHOLD)
			in.timestamp = source->next_audio_ts_min;
653 654
	}

655
	source->next_audio_ts_min = in.timestamp +
J
jp9000 已提交
656
		conv_frames_to_time(in.frames);
657

658
	if (source->av_sync_ref != 0)
659 660
		return;

J
jp9000 已提交
661
	in.timestamp += source->timing_adjust + source->sync_offset;
J
jp9000 已提交
662 663
	in.volume = source->user_volume * source->present_volume *
		obs->audio.user_volume * obs->audio.present_volume;
664

665
	audio_line_output(source->audio_line, &in);
666
	obs_source_update_volume_level(source, &in);
667 668
}

669 670 671 672 673 674 675 676
enum convert_type {
	CONVERT_NONE,
	CONVERT_NV12,
	CONVERT_420,
	CONVERT_422_U,
	CONVERT_422_Y,
};

677
static inline enum convert_type get_convert_type(enum video_format format)
678
{
679
	switch (format) {
680 681 682 683 684 685 686 687 688 689 690
	case VIDEO_FORMAT_I420:
		return CONVERT_420;
	case VIDEO_FORMAT_NV12:
		return CONVERT_NV12;

	case VIDEO_FORMAT_YVYU:
	case VIDEO_FORMAT_YUY2:
		return CONVERT_422_Y;
	case VIDEO_FORMAT_UYVY:
		return CONVERT_422_U;

691
	case VIDEO_FORMAT_NONE:
692 693 694 695 696 697 698 699 700
	case VIDEO_FORMAT_RGBA:
	case VIDEO_FORMAT_BGRA:
	case VIDEO_FORMAT_BGRX:
		return CONVERT_NONE;
	}

	return CONVERT_NONE;
}

701 702 703 704
static inline bool set_packed422_sizes(struct obs_source *source,
		struct source_frame *frame)
{
	source->async_convert_height = frame->height;
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
	source->async_convert_width  = frame->width / 2;
	source->async_texture_format = GS_BGRA;
	return true;
}

static inline bool set_planar420_sizes(struct obs_source *source,
		struct source_frame *frame)
{
	uint32_t size = frame->width * frame->height;
	size += size/2;

	source->async_convert_width   = frame->width;
	source->async_convert_height  = (size / frame->width + 1) & 0xFFFFFFFE;
	source->async_texture_format  = GS_R8;
	source->async_plane_offset[0] = frame->width * frame->height;
	source->async_plane_offset[1] = source->async_plane_offset[0] +
		frame->width * frame->height / 4;
722 723 724 725 726 727 728 729 730 731 732 733
	return true;
}

static inline bool init_gpu_conversion(struct obs_source *source,
		struct source_frame *frame)
{
	switch (get_convert_type(frame->format)) {
		case CONVERT_422_Y:
		case CONVERT_422_U:
			return set_packed422_sizes(source, frame);

		case CONVERT_420:
734 735 736 737
			return set_planar420_sizes(source, frame);

		case CONVERT_NV12:
			assert(false && "NV12 not yet implemented");
738 739 740 741 742 743 744 745 746 747 748
			/* TODO: implement conversion */
			break;

		case CONVERT_NONE:
			assert(false && "No conversion requested");
			break;

	}
	return false;
}

749 750 751 752 753 754 755 756 757 758 759
static inline enum gs_color_format convert_video_format(
		enum video_format format)
{
	if (format == VIDEO_FORMAT_RGBA)
		return GS_RGBA;
	else if (format == VIDEO_FORMAT_BGRA)
		return GS_BGRA;

	return GS_BGRX;
}

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
static inline bool set_async_texture_size(struct obs_source *source,
		struct source_frame *frame)
{
	enum convert_type prev, cur;
	prev = get_convert_type(source->async_format);
	cur  = get_convert_type(frame->format);
	if (source->async_texture) {
		if (source->async_width  == frame->width &&
		    source->async_height == frame->height &&
		    prev == cur)
			return true;
	}

	texture_destroy(source->async_texture);
	texrender_destroy(source->async_convert_texrender);
	source->async_convert_texrender = NULL;

	if (cur != CONVERT_NONE && init_gpu_conversion(source, frame)) {
		source->async_gpu_conversion = true;

		source->async_convert_texrender =
781
			texrender_create(GS_BGRX, GS_ZS_NONE);
782 783 784 785

		source->async_texture = gs_create_texture(
				source->async_convert_width,
				source->async_convert_height,
786 787
				source->async_texture_format,
				1, NULL, GS_DYNAMIC);
788 789

	} else {
790 791
		enum gs_color_format format = convert_video_format(
				frame->format);
792 793 794 795
		source->async_gpu_conversion = false;

		source->async_texture = gs_create_texture(
				frame->width, frame->height,
796
				format, 1, NULL, GS_DYNAMIC);
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
	}

	if (!source->async_texture)
		return false;

	source->async_width  = frame->width;
	source->async_height = frame->height;
	return true;
}

static void upload_raw_frame(texture_t tex, const struct source_frame *frame)
{
	switch (get_convert_type(frame->format)) {
		case CONVERT_422_U:
		case CONVERT_422_Y:
			texture_setimage(tex, frame->data[0],
					frame->linesize[0], false);
			break;

		case CONVERT_420:
817 818 819 820 821
			texture_setimage(tex, frame->data[0],
					frame->width, false);
			break;

		case CONVERT_NV12:
822 823 824 825 826 827 828 829 830 831 832 833 834
			assert(false && "Conversion not yet implemented");
			break;

		case CONVERT_NONE:
			assert(false && "No conversion requested");
			break;
	}
}

static const char *select_conversion_technique(enum video_format format)
{
	switch (format) {
		case VIDEO_FORMAT_UYVY:
835
			return "UYVY_Reverse";
836 837 838 839 840 841 842 843

		case VIDEO_FORMAT_YUY2:
			return "YUY2_Reverse";

		case VIDEO_FORMAT_YVYU:
			return "YVYU_Reverse";

		case VIDEO_FORMAT_I420:
844 845 846
			return "I420_Reverse";

		case VIDEO_FORMAT_NV12:
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
			assert(false && "Conversion not yet implemented");
			break;

		case VIDEO_FORMAT_BGRA:
		case VIDEO_FORMAT_BGRX:
		case VIDEO_FORMAT_RGBA:
		case VIDEO_FORMAT_NONE:
			assert(false && "No conversion requested");
			break;
	}
	return NULL;
}

static inline void set_eparam(effect_t effect, const char *name, float val)
{
	eparam_t param = effect_getparambyname(effect, name);
863
	effect_setfloat(param, val);
864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
}

static bool update_async_texrender(struct obs_source *source,
		const struct source_frame *frame)
{
	texture_t   tex       = source->async_texture;
	texrender_t texrender = source->async_convert_texrender;

	texrender_reset(texrender);

	upload_raw_frame(tex, frame);

	uint32_t cx = source->async_width;
	uint32_t cy = source->async_height;

879 880 881
	float convert_width  = (float)source->async_convert_width;
	float convert_height = (float)source->async_convert_height;

882 883 884 885 886 887 888 889 890 891
	effect_t conv = obs->video.conversion_effect;
	technique_t tech = effect_gettechnique(conv,
			select_conversion_technique(frame->format));

	if (!texrender_begin(texrender, cx, cy))
		return false;

	technique_begin(tech);
	technique_beginpass(tech, 0);

892
	effect_settexture(effect_getparambyname(conv, "image"), tex);
893 894 895 896
	set_eparam(conv, "width",  (float)cx);
	set_eparam(conv, "height", (float)cy);
	set_eparam(conv, "width_i",  1.0f / cx);
	set_eparam(conv, "height_i", 1.0f / cy);
897
	set_eparam(conv, "width_d2",  cx * 0.5f);
898
	set_eparam(conv, "height_d2", cy * 0.5f);
899
	set_eparam(conv, "width_d2_i",  1.0f / (cx * 0.5f));
900
	set_eparam(conv, "height_d2_i", 1.0f / (cy * 0.5f));
901 902 903 904 905 906 907 908 909 910
	set_eparam(conv, "input_width",  convert_width);
	set_eparam(conv, "input_height", convert_height);
	set_eparam(conv, "input_width_i",  1.0f / convert_width);
	set_eparam(conv, "input_height_i", 1.0f / convert_height);
	set_eparam(conv, "input_width_i_d2",  (1.0f / convert_width)  * 0.5f);
	set_eparam(conv, "input_height_i_d2", (1.0f / convert_height) * 0.5f);
	set_eparam(conv, "u_plane_offset",
			(float)source->async_plane_offset[0]);
	set_eparam(conv, "v_plane_offset",
			(float)source->async_plane_offset[1]);
911 912 913 914 915 916 917 918 919 920 921 922 923

	gs_ortho(0.f, (float)cx, 0.f, (float)cy, -100.f, 100.f);

	gs_draw_sprite(tex, 0, cx, cy);

	technique_endpass(tech);
	technique_end(tech);

	texrender_end(texrender);

	return true;
}

924 925
static bool update_async_texture(struct obs_source *source,
		const struct source_frame *frame)
926
{
927 928 929
	texture_t         tex       = source->async_texture;
	texrender_t       texrender = source->async_convert_texrender;
	enum convert_type type      = get_convert_type(frame->format);
930
	uint8_t           *ptr;
931 932
	uint32_t          linesize;

933 934 935
	source->async_format     = frame->format;
	source->async_flip       = frame->flip;
	source->async_full_range = frame->full_range;
936 937
	memcpy(source->async_color_matrix, frame->color_matrix,
			sizeof(frame->color_matrix));
938 939 940 941
	memcpy(source->async_color_range_min, frame->color_range_min,
			sizeof frame->color_range_min);
	memcpy(source->async_color_range_max, frame->color_range_max,
			sizeof frame->color_range_max);
942

943 944 945
	if (source->async_gpu_conversion && texrender)
		return update_async_texrender(source, frame);

946
	if (type == CONVERT_NONE) {
947
		texture_setimage(tex, frame->data[0], frame->linesize[0],
948
				false);
949 950 951
		return true;
	}

952
	if (!texture_map(tex, &ptr, &linesize))
953 954 955
		return false;

	if (type == CONVERT_420)
J
jp9000 已提交
956 957 958
		decompress_420((const uint8_t* const*)frame->data,
				frame->linesize,
				0, frame->height, ptr, linesize);
959 960

	else if (type == CONVERT_NV12)
J
jp9000 已提交
961 962 963
		decompress_nv12((const uint8_t* const*)frame->data,
				frame->linesize,
				0, frame->height, ptr, linesize);
964 965

	else if (type == CONVERT_422_Y)
966
		decompress_422(frame->data[0], frame->linesize[0],
J
jp9000 已提交
967
				0, frame->height, ptr, linesize, true);
968 969

	else if (type == CONVERT_422_U)
970
		decompress_422(frame->data[0], frame->linesize[0],
J
jp9000 已提交
971
				0, frame->height, ptr, linesize, false);
972 973 974 975 976

	texture_unmap(tex);
	return true;
}

977
static inline void obs_source_draw_texture(struct obs_source *source,
978 979
		effect_t effect, float *color_matrix,
		float const *color_range_min, float const *color_range_max)
980
{
981 982
	texture_t tex = source->async_texture;
	eparam_t  param;
983

984 985 986
	if (source->async_convert_texrender)
		tex = texrender_gettexture(source->async_convert_texrender);

P
Palana 已提交
987
	if (color_range_min) {
988 989
		size_t const size = sizeof(float) * 3;
		param = effect_getparambyname(effect, "color_range_min");
990
		effect_setval(param, color_range_min, size);
P
Palana 已提交
991
	}
992

P
Palana 已提交
993 994
	if (color_range_max) {
		size_t const size = sizeof(float) * 3;
995
		param = effect_getparambyname(effect, "color_range_max");
996
		effect_setval(param, color_range_max, size);
P
Palana 已提交
997
	}
998

P
Palana 已提交
999
	if (color_matrix) {
1000
		param = effect_getparambyname(effect, "color_matrix");
1001
		effect_setval(param, color_matrix, sizeof(float) * 16);
1002 1003
	}

J
jp9000 已提交
1004
	param = effect_getparambyname(effect, "image");
1005
	effect_settexture(param, tex);
1006

1007 1008
	gs_draw_sprite(tex, source->async_flip ? GS_FLIP_V : 0, 0, 0);
}
1009

1010 1011
static void obs_source_draw_async_texture(struct obs_source *source)
{
1012 1013 1014 1015 1016
	effect_t    effect        = gs_geteffect();
	bool        yuv           = format_is_yuv(source->async_format);
	bool        limited_range = yuv && !source->async_full_range;
	const char  *type         = yuv ? "DrawMatrix" : "Draw";
	bool        def_draw      = (!effect);
1017
	technique_t tech          = NULL;
1018 1019 1020 1021 1022 1023 1024 1025 1026

	if (def_draw) {
		effect = obs_get_default_effect();
		tech = effect_gettechnique(effect, type);
		technique_begin(tech);
		technique_beginpass(tech, 0);
	}

	obs_source_draw_texture(source, effect,
1027 1028 1029
			yuv ? source->async_color_matrix : NULL,
			limited_range ? source->async_color_range_min : NULL,
			limited_range ? source->async_color_range_max : NULL);
1030 1031 1032 1033 1034

	if (def_draw) {
		technique_endpass(tech);
		technique_end(tech);
	}
1035 1036
}

1037 1038
static void obs_source_render_async_video(obs_source_t source)
{
1039
	struct source_frame *frame = obs_source_get_frame(source);
1040 1041 1042 1043 1044 1045
	if (frame) {
		if (!set_async_texture_size(source, frame))
			return;
		if (!update_async_texture(source, frame))
			return;
	}
1046

1047 1048
	if (source->async_texture)
		obs_source_draw_async_texture(source);
1049

1050
	obs_source_release_frame(source, frame);
1051 1052
}

1053 1054 1055 1056 1057 1058 1059
static inline void obs_source_render_filters(obs_source_t source)
{
	source->rendering_filter = true;
	obs_source_video_render(source->filters.array[0]);
	source->rendering_filter = false;
}

J
jp9000 已提交
1060 1061
static inline void obs_source_default_render(obs_source_t source,
		bool color_matrix)
1062 1063
{
	effect_t    effect     = obs->video.default_effect;
J
jp9000 已提交
1064
	const char  *tech_name = color_matrix ? "DrawMatrix" : "Draw";
1065 1066 1067 1068 1069 1070
	technique_t tech       = effect_gettechnique(effect, tech_name);
	size_t      passes, i;

	passes = technique_begin(tech);
	for (i = 0; i < passes; i++) {
		technique_beginpass(tech, i);
1071 1072
		if (source->context.data)
			source->info.video_render(source->context.data, effect);
1073 1074 1075 1076 1077 1078 1079
		technique_endpass(tech);
	}
	technique_end(tech);
}

static inline void obs_source_main_render(obs_source_t source)
{
1080 1081 1082
	uint32_t flags      = source->info.output_flags;
	bool color_matrix   = (flags & OBS_SOURCE_COLOR_MATRIX) != 0;
	bool custom_draw    = (flags & OBS_SOURCE_CUSTOM_DRAW) != 0;
1083 1084
	bool default_effect = !source->filter_parent &&
	                      source->filters.num == 0 &&
1085
	                      !custom_draw;
1086 1087

	if (default_effect)
J
jp9000 已提交
1088
		obs_source_default_render(source, color_matrix);
1089
	else if (source->context.data)
1090
		source->info.video_render(source->context.data,
1091
				custom_draw ? NULL : gs_geteffect());
1092 1093
}

1094
void obs_source_video_render(obs_source_t source)
J
jp9000 已提交
1095
{
1096
	if (!source_valid(source)) return;
J
jp9000 已提交
1097

1098 1099
	if (source->filters.num && !source->rendering_filter)
		obs_source_render_filters(source);
1100

1101 1102 1103 1104
	else if (source->info.video_render)
		obs_source_main_render(source);

	else if (source->filter_target)
1105 1106
		obs_source_video_render(source->filter_target);

1107
	else
1108
		obs_source_render_async_video(source);
J
jp9000 已提交
1109 1110
}

J
jp9000 已提交
1111
uint32_t obs_source_getwidth(obs_source_t source)
J
jp9000 已提交
1112
{
1113
	if (!source_valid(source)) return 0;
1114 1115

	if (source->info.getwidth)
1116
		return source->info.getwidth(source->context.data);
1117
	return source->async_width;
J
jp9000 已提交
1118 1119
}

J
jp9000 已提交
1120
uint32_t obs_source_getheight(obs_source_t source)
J
jp9000 已提交
1121
{
1122
	if (!source_valid(source)) return 0;
1123 1124

	if (source->info.getheight)
1125
		return source->info.getheight(source->context.data);
1126
	return source->async_height;
J
jp9000 已提交
1127 1128
}

1129 1130
obs_source_t obs_filter_getparent(obs_source_t filter)
{
J
jp9000 已提交
1131
	return filter ? filter->filter_parent : NULL;
1132 1133
}

1134
obs_source_t obs_filter_gettarget(obs_source_t filter)
J
jp9000 已提交
1135
{
J
jp9000 已提交
1136
	return filter ? filter->filter_target : NULL;
J
jp9000 已提交
1137 1138
}

1139
void obs_source_filter_add(obs_source_t source, obs_source_t filter)
J
jp9000 已提交
1140
{
J
jp9000 已提交
1141 1142 1143
	if (!source || !filter)
		return;

1144 1145
	pthread_mutex_lock(&source->filter_mutex);

J
jp9000 已提交
1146
	if (da_find(source->filters, &filter, 0) != DARRAY_INVALID) {
J
jp9000 已提交
1147 1148 1149 1150 1151 1152
		blog(LOG_WARNING, "Tried to add a filter that was already "
		                  "present on the source");
		return;
	}

	if (source->filters.num) {
1153
		obs_source_t *back = da_end(source->filters);
J
jp9000 已提交
1154 1155 1156 1157
		(*back)->filter_target = filter;
	}

	da_push_back(source->filters, &filter);
1158 1159 1160 1161

	pthread_mutex_unlock(&source->filter_mutex);

	filter->filter_parent = source;
J
jp9000 已提交
1162 1163 1164
	filter->filter_target = source;
}

1165
void obs_source_filter_remove(obs_source_t source, obs_source_t filter)
J
jp9000 已提交
1166
{
1167 1168
	size_t idx;

J
jp9000 已提交
1169 1170 1171
	if (!source || !filter)
		return;

1172 1173 1174
	pthread_mutex_lock(&source->filter_mutex);

	idx = da_find(source->filters, &filter, 0);
J
jp9000 已提交
1175
	if (idx == DARRAY_INVALID)
J
jp9000 已提交
1176 1177 1178
		return;

	if (idx > 0) {
1179
		obs_source_t prev = source->filters.array[idx-1];
J
jp9000 已提交
1180 1181 1182 1183
		prev->filter_target = filter->filter_target;
	}

	da_erase(source->filters, idx);
1184 1185 1186 1187

	pthread_mutex_unlock(&source->filter_mutex);

	filter->filter_parent = NULL;
J
jp9000 已提交
1188 1189 1190
	filter->filter_target = NULL;
}

1191
void obs_source_filter_setorder(obs_source_t source, obs_source_t filter,
J
jp9000 已提交
1192 1193
		enum order_movement movement)
{
J
jp9000 已提交
1194 1195 1196 1197 1198 1199
	size_t idx, i;

	if (!source || !filter)
		return;

	idx = da_find(source->filters, &filter, 0);
J
jp9000 已提交
1200
	if (idx == DARRAY_INVALID)
J
jp9000 已提交
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
		return;

	if (movement == ORDER_MOVE_UP) {
		if (idx == source->filters.num-1)
			return;
		da_move_item(source->filters, idx, idx+1);

	} else if (movement == ORDER_MOVE_DOWN) {
		if (idx == 0)
			return;
		da_move_item(source->filters, idx, idx-1);

	} else if (movement == ORDER_MOVE_TOP) {
		if (idx == source->filters.num-1)
			return;
		da_move_item(source->filters, idx, source->filters.num-1);

	} else if (movement == ORDER_MOVE_BOTTOM) {
		if (idx == 0)
			return;
		da_move_item(source->filters, idx, 0);
	}

1224
	/* reorder filter targets, not the nicest way of dealing with things */
J
jp9000 已提交
1225
	for (i = 0; i < source->filters.num; i++) {
1226
		obs_source_t next_filter = (i == source->filters.num-1) ?
J
jp9000 已提交
1227 1228 1229 1230 1231
			source : source->filters.array[idx+1];
		source->filters.array[i]->filter_target = next_filter;
	}
}

1232
obs_data_t obs_source_getsettings(obs_source_t source)
J
jp9000 已提交
1233
{
J
jp9000 已提交
1234 1235
	if (!source) return NULL;

1236 1237
	obs_data_addref(source->context.settings);
	return source->context.settings;
J
jp9000 已提交
1238 1239
}

1240 1241
static inline struct source_frame *filter_async_video(obs_source_t source,
		struct source_frame *in)
1242 1243 1244 1245
{
	size_t i;
	for (i = source->filters.num; i > 0; i--) {
		struct obs_source *filter = source->filters.array[i-1];
1246 1247

		if (filter->context.data && filter->info.filter_video) {
1248 1249
			in = filter->info.filter_video(filter->context.data,
					in);
1250 1251 1252 1253 1254 1255 1256 1257
			if (!in)
				return NULL;
		}
	}

	return in;
}

1258 1259 1260
static inline void copy_frame_data_line(struct source_frame *dst,
		const struct source_frame *src, uint32_t plane, uint32_t y)
{
1261 1262 1263 1264
	uint32_t pos_src = y * src->linesize[plane];
	uint32_t pos_dst = y * dst->linesize[plane];
	uint32_t bytes = dst->linesize[plane] < src->linesize[plane] ?
		dst->linesize[plane] : src->linesize[plane];
1265 1266 1267 1268 1269 1270 1271

	memcpy(dst->data[plane] + pos_dst, src->data[plane] + pos_src, bytes);
}

static inline void copy_frame_data_plane(struct source_frame *dst,
		const struct source_frame *src, uint32_t plane, uint32_t lines)
{
1272
	if (dst->linesize[plane] != src->linesize[plane])
1273 1274 1275 1276
		for (uint32_t y = 0; y < lines; y++)
			copy_frame_data_line(dst, src, plane, y);
	else
		memcpy(dst->data[plane], src->data[plane],
1277
				dst->linesize[plane] * lines);
1278 1279 1280 1281 1282 1283
}

static void copy_frame_data(struct source_frame *dst,
		const struct source_frame *src)
{
	dst->flip         = src->flip;
1284
	dst->full_range   = src->full_range;
1285 1286
	dst->timestamp    = src->timestamp;
	memcpy(dst->color_matrix, src->color_matrix, sizeof(float) * 16);
1287 1288 1289 1290 1291
	if (!dst->full_range) {
		size_t const size = sizeof(float) * 3;
		memcpy(dst->color_range_min, src->color_range_min, size);
		memcpy(dst->color_range_max, src->color_range_max, size);
	}
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315

	switch (dst->format) {
	case VIDEO_FORMAT_I420:
		copy_frame_data_plane(dst, src, 0, dst->height);
		copy_frame_data_plane(dst, src, 1, dst->height/2);
		copy_frame_data_plane(dst, src, 2, dst->height/2);
		break;

	case VIDEO_FORMAT_NV12:
		copy_frame_data_plane(dst, src, 0, dst->height);
		copy_frame_data_plane(dst, src, 1, dst->height/2);
		break;

	case VIDEO_FORMAT_YVYU:
	case VIDEO_FORMAT_YUY2:
	case VIDEO_FORMAT_UYVY:
	case VIDEO_FORMAT_NONE:
	case VIDEO_FORMAT_RGBA:
	case VIDEO_FORMAT_BGRA:
	case VIDEO_FORMAT_BGRX:
		copy_frame_data_plane(dst, src, 0, dst->height);
	}
}

J
jp9000 已提交
1316
static inline struct source_frame *cache_video(const struct source_frame *frame)
1317
{
1318
	/* TODO: use an actual cache */
1319
	struct source_frame *new_frame = source_frame_create(frame->format,
1320
			frame->width, frame->height);
1321

1322
	copy_frame_data(new_frame, frame);
1323
	return new_frame;
1324 1325
}

1326
static bool ready_async_frame(obs_source_t source, uint64_t sys_time);
1327 1328 1329 1330

static inline void cycle_frames(struct obs_source *source)
{
	if (source->video_frames.num && !source->activate_refs)
1331
		ready_async_frame(source, os_gettime_ns());
1332 1333
}

1334
void obs_source_output_video(obs_source_t source,
1335
		const struct source_frame *frame)
1336
{
J
jp9000 已提交
1337 1338 1339
	if (!source || !frame)
		return;

J
jp9000 已提交
1340
	struct source_frame *output = cache_video(frame);
1341 1342 1343 1344 1345

	pthread_mutex_lock(&source->filter_mutex);
	output = filter_async_video(source, output);
	pthread_mutex_unlock(&source->filter_mutex);

1346 1347
	if (output) {
		pthread_mutex_lock(&source->video_mutex);
1348
		cycle_frames(source);
1349 1350 1351
		da_push_back(source->video_frames, &output);
		pthread_mutex_unlock(&source->video_mutex);
	}
1352 1353
}

1354 1355
static inline struct filtered_audio *filter_async_audio(obs_source_t source,
		struct filtered_audio *in)
1356 1357 1358 1359
{
	size_t i;
	for (i = source->filters.num; i > 0; i--) {
		struct obs_source *filter = source->filters.array[i-1];
1360 1361

		if (filter->context.data && filter->info.filter_audio) {
1362 1363
			in = filter->info.filter_audio(filter->context.data,
					in);
1364 1365 1366 1367 1368 1369 1370 1371
			if (!in)
				return NULL;
		}
	}

	return in;
}

1372
static inline void reset_resampler(obs_source_t source,
1373 1374
		const struct source_audio *audio)
{
J
jp9000 已提交
1375
	const struct audio_output_info *obs_info;
1376 1377
	struct resample_info output_info;

1378 1379
	obs_info = audio_output_getinfo(obs->audio.audio);

1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
	output_info.format           = obs_info->format;
	output_info.samples_per_sec  = obs_info->samples_per_sec;
	output_info.speakers         = obs_info->speakers;

	source->sample_info.format          = audio->format;
	source->sample_info.samples_per_sec = audio->samples_per_sec;
	source->sample_info.speakers        = audio->speakers;

	if (source->sample_info.samples_per_sec == obs_info->samples_per_sec &&
	    source->sample_info.format          == obs_info->format          &&
	    source->sample_info.speakers        == obs_info->speakers) {
		source->audio_failed = false;
		return;
	}

	audio_resampler_destroy(source->resampler);
	source->resampler = audio_resampler_create(&output_info,
			&source->sample_info);

	source->audio_failed = source->resampler == NULL;
	if (source->resampler == NULL)
		blog(LOG_ERROR, "creation of resampler failed");
}

static inline void copy_audio_data(obs_source_t source,
J
jp9000 已提交
1405
		const uint8_t *const data[], uint32_t frames, uint64_t ts)
1406
{
1407
	size_t planes    = audio_output_planes(obs->audio.audio);
1408
	size_t blocksize = audio_output_blocksize(obs->audio.audio);
1409 1410
	size_t size      = (size_t)frames * blocksize;
	bool   resize    = source->audio_storage_size < size;
1411

J
jp9000 已提交
1412 1413
	source->audio_data.frames    = frames;
	source->audio_data.timestamp = ts;
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426

	for (size_t i = 0; i < planes; i++) {
		/* ensure audio storage capacity */
		if (resize) {
			bfree(source->audio_data.data[i]);
			source->audio_data.data[i] = bmalloc(size);
		}

		memcpy(source->audio_data.data[i], data[i], size);
	}

	if (resize)
		source->audio_storage_size = size;
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
}

/* resamples/remixes new audio to the designated main audio output format */
static void process_audio(obs_source_t source, const struct source_audio *audio)
{
	if (source->sample_info.samples_per_sec != audio->samples_per_sec ||
	    source->sample_info.format          != audio->format          ||
	    source->sample_info.speakers        != audio->speakers)
		reset_resampler(source, audio);

	if (source->audio_failed)
		return;

	if (source->resampler) {
J
jp9000 已提交
1441
		uint8_t  *output[MAX_AV_PLANES];
1442 1443 1444
		uint32_t frames;
		uint64_t offset;

1445 1446 1447 1448 1449
		memset(output, 0, sizeof(output));

		audio_resampler_resample(source->resampler,
				output, &frames, &offset,
				audio->data, audio->frames);
1450

J
jp9000 已提交
1451
		copy_audio_data(source, (const uint8_t *const *)output, frames,
1452 1453 1454 1455 1456
				audio->timestamp - offset);
	} else {
		copy_audio_data(source, audio->data, audio->frames,
				audio->timestamp);
	}
1457 1458 1459 1460 1461
}

void obs_source_output_audio(obs_source_t source,
		const struct source_audio *audio)
{
J
jp9000 已提交
1462
	uint32_t flags;
1463
	struct filtered_audio *output;
1464

J
jp9000 已提交
1465 1466 1467 1468
	if (!source || !audio)
		return;

	flags = source->info.output_flags;
1469
	process_audio(source, audio);
1470 1471

	pthread_mutex_lock(&source->filter_mutex);
1472
	output = filter_async_audio(source, &source->audio_data);
1473 1474

	if (output) {
1475
		bool async = (flags & OBS_SOURCE_ASYNC) != 0;
J
jp9000 已提交
1476

1477 1478
		pthread_mutex_lock(&source->audio_mutex);

1479 1480
		/* wait for video to start before outputting any audio so we
		 * have a base for sync */
1481
		if (source->timing_set || !async) {
1482
			struct audio_data data;
1483

J
jp9000 已提交
1484
			for (int i = 0; i < MAX_AV_PLANES; i++)
1485 1486
				data.data[i] = output->data[i];

1487 1488 1489
			data.frames    = output->frames;
			data.timestamp = output->timestamp;
			source_output_audio_line(source, &data);
1490 1491 1492 1493 1494 1495 1496 1497
		}

		pthread_mutex_unlock(&source->audio_mutex);
	}

	pthread_mutex_unlock(&source->filter_mutex);
}

1498 1499 1500 1501 1502
static inline bool frame_out_of_bounds(obs_source_t source, uint64_t ts)
{
	return ((ts - source->last_frame_ts) > MAX_TIMESTAMP_JUMP);
}

1503
static bool ready_async_frame(obs_source_t source, uint64_t sys_time)
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
{
	struct source_frame *next_frame = source->video_frames.array[0];
	struct source_frame *frame      = NULL;
	uint64_t sys_offset = sys_time - source->last_sys_timestamp;
	uint64_t frame_time = next_frame->timestamp;
	uint64_t frame_offset = 0;

	/* account for timestamp invalidation */
	if (frame_out_of_bounds(source, frame_time)) {
		source->last_frame_ts = next_frame->timestamp;
1514
		os_atomic_inc_long(&source->av_sync_ref);
1515 1516
	} else {
		frame_offset = frame_time - source->last_frame_ts;
1517
		source->last_frame_ts += frame_offset;
1518 1519 1520 1521 1522
	}

	while (frame_offset <= sys_offset) {
		source_frame_destroy(frame);

1523 1524 1525
		if (source->video_frames.num == 1)
			return true;

1526 1527 1528 1529 1530 1531 1532 1533
		frame = next_frame;
		da_erase(source->video_frames, 0);
		next_frame = source->video_frames.array[0];

		/* more timestamp checking and compensating */
		if ((next_frame->timestamp - frame_time) > MAX_TIMESTAMP_JUMP) {
			source->last_frame_ts =
				next_frame->timestamp - frame_offset;
1534
			os_atomic_inc_long(&source->av_sync_ref);
1535 1536 1537 1538 1539 1540
		}

		frame_time   = next_frame->timestamp;
		frame_offset = frame_time - source->last_frame_ts;
	}

1541 1542
	source_frame_destroy(frame);

1543 1544 1545 1546 1547 1548
	return frame != NULL;
}

static inline struct source_frame *get_closest_frame(obs_source_t source,
		uint64_t sys_time)
{
1549
	if (ready_async_frame(source, sys_time)) {
1550 1551 1552 1553 1554 1555
		struct source_frame *frame = source->video_frames.array[0];
		da_erase(source->video_frames, 0);
		return frame;
	}

	return NULL;
1556 1557
}

1558
/*
1559 1560
 * Ensures that cached frames are displayed on time.  If multiple frames
 * were cached between renders, then releases the unnecessary frames and uses
1561 1562
 * the frame with the closest timing to ensure sync.  Also ensures that timing
 * with audio is synchronized.
1563
 */
1564
struct source_frame *obs_source_get_frame(obs_source_t source)
J
jp9000 已提交
1565
{
1566 1567
	struct source_frame *frame = NULL;
	uint64_t sys_time;
1568

J
jp9000 已提交
1569 1570 1571
	if (!source)
		return NULL;

1572 1573 1574 1575 1576
	pthread_mutex_lock(&source->video_mutex);

	if (!source->video_frames.num)
		goto unlock;

1577
	sys_time = os_gettime_ns();
1578

1579 1580
	if (!source->last_frame_ts) {
		frame = source->video_frames.array[0];
1581 1582
		da_erase(source->video_frames, 0);

1583
		source->last_frame_ts = frame->timestamp;
1584
	} else {
1585
		frame = get_closest_frame(source, sys_time);
J
jp9000 已提交
1586 1587 1588 1589 1590 1591
	}

	/* reset timing to current system time */
	if (frame) {
		source->timing_adjust = sys_time - frame->timestamp;
		source->timing_set = true;
1592 1593 1594 1595 1596 1597
	}

	source->last_sys_timestamp = sys_time;

unlock:
	pthread_mutex_unlock(&source->video_mutex);
1598

1599
	if (frame)
1600 1601
		obs_source_addref(source);

1602
	return frame;
J
jp9000 已提交
1603 1604
}

1605
void obs_source_release_frame(obs_source_t source, struct source_frame *frame)
J
jp9000 已提交
1606
{
J
jp9000 已提交
1607
	if (source && frame) {
1608 1609 1610
		source_frame_destroy(frame);
		obs_source_release(source);
	}
J
jp9000 已提交
1611
}
1612 1613 1614

const char *obs_source_getname(obs_source_t source)
{
1615
	return source ? source->context.name : NULL;
1616 1617 1618 1619
}

void obs_source_setname(obs_source_t source, const char *name)
{
J
jp9000 已提交
1620
	if (!source) return;
J
jp9000 已提交
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635

	if (!name || !*name || strcmp(name, source->context.name) != 0) {
		struct calldata data;
		char *prev_name = bstrdup(source->context.name);
		obs_context_data_setname(&source->context, name);

		calldata_init(&data);
		calldata_setptr(&data, "source", source);
		calldata_setstring(&data, "new_name", source->context.name);
		calldata_setstring(&data, "prev_name", prev_name);
		signal_handler_signal(obs->signals, "source_rename", &data);
		signal_handler_signal(source->context.signals, "rename", &data);
		calldata_free(&data);
		bfree(prev_name);
	}
1636 1637
}

1638
void obs_source_gettype(obs_source_t source, enum obs_source_type *type,
1639 1640
		const char **id)
{
J
jp9000 已提交
1641 1642
	if (!source) return;

J
jp9000 已提交
1643
	if (type) *type = source->info.type;
J
jp9000 已提交
1644
	if (id)   *id   = source->info.id;
1645
}
1646 1647

static inline void render_filter_bypass(obs_source_t target, effect_t effect,
J
jp9000 已提交
1648
		bool use_matrix)
1649
{
J
jp9000 已提交
1650
	const char  *tech_name = use_matrix ? "DrawMatrix" : "Draw";
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
	technique_t tech       = effect_gettechnique(effect, tech_name);
	size_t      passes, i;

	passes = technique_begin(tech);
	for (i = 0; i < passes; i++) {
		technique_beginpass(tech, i);
		obs_source_video_render(target);
		technique_endpass(tech);
	}
	technique_end(tech);
}

static inline void render_filter_tex(texture_t tex, effect_t effect,
J
jp9000 已提交
1664
		uint32_t width, uint32_t height, bool use_matrix)
1665
{
J
jp9000 已提交
1666
	const char  *tech_name = use_matrix ? "DrawMatrix" : "Draw";
1667
	technique_t tech       = effect_gettechnique(effect, tech_name);
J
jp9000 已提交
1668
	eparam_t    image      = effect_getparambyname(effect, "image");
1669 1670
	size_t      passes, i;

1671
	effect_settexture(image, tex);
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681

	passes = technique_begin(tech);
	for (i = 0; i < passes; i++) {
		technique_beginpass(tech, i);
		gs_draw_sprite(tex, width, height, 0);
		technique_endpass(tech);
	}
	technique_end(tech);
}

J
jp9000 已提交
1682 1683
void obs_source_process_filter(obs_source_t filter, effect_t effect,
		uint32_t width, uint32_t height, enum gs_color_format format,
1684
		enum allow_direct_render allow_direct)
1685
{
J
jp9000 已提交
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
	obs_source_t target, parent;
	uint32_t     target_flags, parent_flags;
	int          cx, cy;
	bool         use_matrix, expects_def, can_directly;

	if (!filter) return;

	target       = obs_filter_gettarget(filter);
	parent       = obs_filter_getparent(filter);
	target_flags = target->info.output_flags;
	parent_flags = parent->info.output_flags;
	cx           = obs_source_getwidth(target);
	cy           = obs_source_getheight(target);
	use_matrix   = !!(target_flags & OBS_SOURCE_COLOR_MATRIX);
	expects_def  = !(parent_flags & OBS_SOURCE_CUSTOM_DRAW);
	can_directly = allow_direct == ALLOW_DIRECT_RENDERING;
1702 1703 1704 1705 1706

	/* if the parent does not use any custom effects, and this is the last
	 * filter in the chain for the parent, then render the parent directly
	 * using the filter effect instead of rendering to texture to reduce
	 * the total number of passes */
1707
	if (can_directly && expects_def && target == parent) {
J
jp9000 已提交
1708
		render_filter_bypass(target, effect, use_matrix);
1709 1710 1711
		return;
	}

J
jp9000 已提交
1712 1713 1714 1715 1716
	if (!filter->filter_texrender)
		filter->filter_texrender = texrender_create(format,
				GS_ZS_NONE);

	if (texrender_begin(filter->filter_texrender, cx, cy)) {
1717
		gs_ortho(0.0f, (float)cx, 0.0f, (float)cy, -100.0f, 100.0f);
1718
		if (expects_def && parent == target)
J
jp9000 已提交
1719
			obs_source_default_render(parent, use_matrix);
1720 1721
		else
			obs_source_video_render(target);
J
jp9000 已提交
1722
		texrender_end(filter->filter_texrender);
1723 1724 1725 1726
	}

	/* --------------------------- */

J
jp9000 已提交
1727 1728
	render_filter_tex(texrender_gettexture(filter->filter_texrender),
			effect, width, height, use_matrix);
1729
}
1730 1731 1732

signal_handler_t obs_source_signalhandler(obs_source_t source)
{
1733
	return source ? source->context.signals : NULL;
1734 1735 1736 1737
}

proc_handler_t obs_source_prochandler(obs_source_t source)
{
1738
	return source ? source->context.procs : NULL;
1739
}
J
jp9000 已提交
1740 1741 1742

void obs_source_setvolume(obs_source_t source, float volume)
{
J
jp9000 已提交
1743 1744 1745 1746 1747
	if (source) {
		struct calldata data = {0};
		calldata_setptr(&data, "source", source);
		calldata_setfloat(&data, "volume", volume);

1748
		signal_handler_signal(source->context.signals, "volume", &data);
1749
		signal_handler_signal(obs->signals, "source_volume", &data);
J
jp9000 已提交
1750

1751
		volume = (float)calldata_float(&data, "volume");
J
jp9000 已提交
1752 1753
		calldata_free(&data);

J
jp9000 已提交
1754
		source->user_volume = volume;
J
jp9000 已提交
1755
	}
J
jp9000 已提交
1756 1757
}

J
jp9000 已提交
1758 1759 1760 1761 1762 1763 1764 1765 1766
static void set_tree_preset_vol(obs_source_t parent, obs_source_t child,
		void *param)
{
	float *vol = param;
	child->present_volume = *vol;

	UNUSED_PARAMETER(parent);
}

J
jp9000 已提交
1767 1768
void obs_source_set_present_volume(obs_source_t source, float volume)
{
J
jp9000 已提交
1769
	if (source) {
J
jp9000 已提交
1770
		source->present_volume = volume;
J
jp9000 已提交
1771 1772 1773 1774 1775 1776 1777 1778

		/* don't set the presentation volume of the tree if a
		 * transition source, let the transition handle presentation
		 * volume for the child sources itself. */
		if (source->info.type != OBS_SOURCE_TYPE_TRANSITION)
			obs_source_enum_tree(source, set_tree_preset_vol,
					&volume);
	}
J
jp9000 已提交
1779 1780 1781 1782
}

float obs_source_getvolume(obs_source_t source)
{
J
jp9000 已提交
1783
	return source ? source->user_volume : 0.0f;
J
jp9000 已提交
1784 1785 1786 1787
}

float obs_source_get_present_volume(obs_source_t source)
{
J
jp9000 已提交
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
	return source ? source->present_volume : 0.0f;
}

void obs_source_set_sync_offset(obs_source_t source, int64_t offset)
{
	if (source)
		source->sync_offset = offset;
}

int64_t obs_source_get_sync_offset(obs_source_t source)
{
	return source ? source->sync_offset : 0;
J
jp9000 已提交
1800
}
1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812

struct source_enum_data {
	obs_source_enum_proc_t enum_callback;
	void *param;
};

static void enum_source_tree_callback(obs_source_t parent, obs_source_t child,
		void *param)
{
	struct source_enum_data *data = param;

	if (child->info.enum_sources && !child->enum_refs) {
J
jp9000 已提交
1813
		os_atomic_inc_long(&child->enum_refs);
1814

1815 1816 1817
		if (child->context.data)
			child->info.enum_sources(child->context.data,
					enum_source_tree_callback, data);
1818

J
jp9000 已提交
1819
		os_atomic_dec_long(&child->enum_refs);
1820 1821 1822 1823 1824 1825 1826 1827 1828
	}

	data->enum_callback(parent, child, data->param);
}

void obs_source_enum_sources(obs_source_t source,
		obs_source_enum_proc_t enum_callback,
		void *param)
{
1829 1830 1831
	if (!source_valid(source)      ||
	    !source->info.enum_sources ||
	    source->enum_refs)
1832 1833 1834 1835
		return;

	obs_source_addref(source);

J
jp9000 已提交
1836
	os_atomic_inc_long(&source->enum_refs);
1837
	source->info.enum_sources(source->context.data, enum_callback, param);
J
jp9000 已提交
1838
	os_atomic_dec_long(&source->enum_refs);
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848

	obs_source_release(source);
}

void obs_source_enum_tree(obs_source_t source,
		obs_source_enum_proc_t enum_callback,
		void *param)
{
	struct source_enum_data data = {enum_callback, param};

1849 1850 1851
	if (!source_valid(source)      ||
	    !source->info.enum_sources ||
	    source->enum_refs)
1852 1853 1854 1855
		return;

	obs_source_addref(source);

J
jp9000 已提交
1856
	os_atomic_inc_long(&source->enum_refs);
1857 1858
	source->info.enum_sources(source->context.data,
			enum_source_tree_callback,
1859
			&data);
J
jp9000 已提交
1860
	os_atomic_dec_long(&source->enum_refs);
1861 1862 1863

	obs_source_release(source);
}
1864 1865 1866 1867 1868

void obs_source_add_child(obs_source_t parent, obs_source_t child)
{
	if (!parent || !child) return;

1869 1870 1871 1872 1873
	for (int i = 0; i < parent->show_refs; i++) {
		enum view_type type;
		type = (i < parent->activate_refs) ? MAIN_VIEW : AUX_VIEW;
		obs_source_activate(child, type);
	}
1874 1875 1876 1877 1878 1879
}

void obs_source_remove_child(obs_source_t parent, obs_source_t child)
{
	if (!parent || !child) return;

1880 1881 1882 1883 1884
	for (int i = 0; i < parent->show_refs; i++) {
		enum view_type type;
		type = (i < parent->activate_refs) ? MAIN_VIEW : AUX_VIEW;
		obs_source_deactivate(child, type);
	}
1885
}
J
jp9000 已提交
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932

static void reset_transition_vol(obs_source_t parent, obs_source_t child,
		void *param)
{
	child->transition_volume = 0.0f;

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

static void add_transition_vol(obs_source_t parent, obs_source_t child,
		void *param)
{
	float *vol = param;
	child->transition_volume += *vol;

	UNUSED_PARAMETER(parent);
}

static void apply_transition_vol(obs_source_t parent, obs_source_t child,
		void *param)
{
	child->present_volume = child->transition_volume;

	UNUSED_PARAMETER(parent);
	UNUSED_PARAMETER(param);
}

void obs_transition_begin_frame(obs_source_t transition)
{
	if (!transition) return;
	obs_source_enum_tree(transition, reset_transition_vol, NULL);
}

void obs_source_set_transition_vol(obs_source_t source, float vol)
{
	if (!source) return;

	add_transition_vol(NULL, source, &vol);
	obs_source_enum_tree(source, add_transition_vol, &vol);
}

void obs_transition_end_frame(obs_source_t transition)
{
	if (!transition) return;
	obs_source_enum_tree(transition, apply_transition_vol, NULL);
}
1933 1934 1935

void obs_source_save(obs_source_t source)
{
1936
	if (!source_valid(source) || !source->info.save) return;
1937 1938 1939 1940 1941
	source->info.save(source->context.data, source->context.settings);
}

void obs_source_load(obs_source_t source)
{
1942
	if (!source_valid(source) || !source->info.load) return;
1943 1944
	source->info.load(source->context.data, source->context.settings);
}