Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_jdk
提交
ca2d21df
D
dragonwell8_jdk
项目概览
openanolis
/
dragonwell8_jdk
通知
4
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_jdk
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ca2d21df
编写于
11月 27, 2009
作者:
K
kalli
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
6833357: Improve time-stamp support in Gervill to reduce jitter
Reviewed-by: amenkov
上级
48523ecf
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
585 addition
and
86 deletion
+585
-86
src/share/classes/com/sun/media/sound/MidiDeviceReceiver.java
...share/classes/com/sun/media/sound/MidiDeviceReceiver.java
+41
-0
src/share/classes/com/sun/media/sound/SoftAudioBuffer.java
src/share/classes/com/sun/media/sound/SoftAudioBuffer.java
+24
-0
src/share/classes/com/sun/media/sound/SoftChannel.java
src/share/classes/com/sun/media/sound/SoftChannel.java
+16
-5
src/share/classes/com/sun/media/sound/SoftLimiter.java
src/share/classes/com/sun/media/sound/SoftLimiter.java
+1
-1
src/share/classes/com/sun/media/sound/SoftMainMixer.java
src/share/classes/com/sun/media/sound/SoftMainMixer.java
+167
-51
src/share/classes/com/sun/media/sound/SoftReceiver.java
src/share/classes/com/sun/media/sound/SoftReceiver.java
+7
-2
src/share/classes/com/sun/media/sound/SoftSynthesizer.java
src/share/classes/com/sun/media/sound/SoftSynthesizer.java
+5
-0
src/share/classes/com/sun/media/sound/SoftVoice.java
src/share/classes/com/sun/media/sound/SoftVoice.java
+68
-27
test/javax/sound/midi/Gervill/SoftReceiver/GetMidiDevice.java
.../javax/sound/midi/Gervill/SoftReceiver/GetMidiDevice.java
+48
-0
test/javax/sound/midi/Gervill/SoftSynthesizer/TestPreciseTimestampRendering.java
...ervill/SoftSynthesizer/TestPreciseTimestampRendering.java
+208
-0
未找到文件。
src/share/classes/com/sun/media/sound/MidiDeviceReceiver.java
0 → 100644
浏览文件 @
ca2d21df
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Sun designates this
* particular file as subject to the "Classpath" exception as provided
* by Sun in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
package
com.sun.media.sound
;
import
javax.sound.midi.MidiDevice
;
import
javax.sound.midi.Receiver
;
/**
* A Receiver with reference to it's MidiDevice object.
*
* @author Karl Helgason
*/
public
interface
MidiDeviceReceiver
extends
Receiver
{
/** Obtains the MidiDevice object associated with this Receiver.
*/
public
MidiDevice
getMidiDevice
();
}
src/share/classes/com/sun/media/sound/SoftAudioBuffer.java
浏览文件 @
ca2d21df
...
@@ -48,6 +48,30 @@ public class SoftAudioBuffer {
...
@@ -48,6 +48,30 @@ public class SoftAudioBuffer {
converter
=
AudioFloatConverter
.
getConverter
(
format
);
converter
=
AudioFloatConverter
.
getConverter
(
format
);
}
}
public
void
swap
(
SoftAudioBuffer
swap
)
{
int
bak_size
=
size
;
float
[]
bak_buffer
=
buffer
;
boolean
bak_empty
=
empty
;
AudioFormat
bak_format
=
format
;
AudioFloatConverter
bak_converter
=
converter
;
byte
[]
bak_converter_buffer
=
converter_buffer
;
size
=
swap
.
size
;
buffer
=
swap
.
buffer
;
empty
=
swap
.
empty
;
format
=
swap
.
format
;
converter
=
swap
.
converter
;
converter_buffer
=
swap
.
converter_buffer
;
swap
.
size
=
bak_size
;
swap
.
buffer
=
bak_buffer
;
swap
.
empty
=
bak_empty
;
swap
.
format
=
bak_format
;
swap
.
converter
=
bak_converter
;
swap
.
converter_buffer
=
bak_converter_buffer
;
}
public
AudioFormat
getFormat
()
{
public
AudioFormat
getFormat
()
{
return
format
;
return
format
;
}
}
...
...
src/share/classes/com/sun/media/sound/SoftChannel.java
浏览文件 @
ca2d21df
...
@@ -328,7 +328,7 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
...
@@ -328,7 +328,7 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
}
}
protected
void
initVoice
(
SoftVoice
voice
,
SoftPerformer
p
,
int
voiceID
,
protected
void
initVoice
(
SoftVoice
voice
,
SoftPerformer
p
,
int
voiceID
,
int
noteNumber
,
int
velocity
,
ModelConnectionBlock
[]
connectionBlocks
,
int
noteNumber
,
int
velocity
,
int
delay
,
ModelConnectionBlock
[]
connectionBlocks
,
ModelChannelMixer
channelmixer
,
boolean
releaseTriggered
)
{
ModelChannelMixer
channelmixer
,
boolean
releaseTriggered
)
{
if
(
voice
.
active
)
{
if
(
voice
.
active
)
{
// Voice is active , we must steal the voice
// Voice is active , we must steal the voice
...
@@ -363,7 +363,7 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
...
@@ -363,7 +363,7 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
voice
.
objects
.
put
(
"midi_cc"
,
co_midi_cc
);
voice
.
objects
.
put
(
"midi_cc"
,
co_midi_cc
);
voice
.
objects
.
put
(
"midi_rpn"
,
co_midi_rpn
);
voice
.
objects
.
put
(
"midi_rpn"
,
co_midi_rpn
);
voice
.
objects
.
put
(
"midi_nrpn"
,
co_midi_nrpn
);
voice
.
objects
.
put
(
"midi_nrpn"
,
co_midi_nrpn
);
voice
.
noteOn
(
noteNumber
,
velocity
);
voice
.
noteOn
(
noteNumber
,
velocity
,
delay
);
voice
.
setMute
(
mute
);
voice
.
setMute
(
mute
);
voice
.
setSoloMute
(
solomute
);
voice
.
setSoloMute
(
solomute
);
if
(
releaseTriggered
)
if
(
releaseTriggered
)
...
@@ -399,14 +399,21 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
...
@@ -399,14 +399,21 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
}
}
public
void
noteOn
(
int
noteNumber
,
int
velocity
)
{
public
void
noteOn
(
int
noteNumber
,
int
velocity
)
{
noteOn
(
noteNumber
,
velocity
,
0
);
}
/* A special noteOn with delay parameter, which is used to
* start note within control buffers.
*/
protected
void
noteOn
(
int
noteNumber
,
int
velocity
,
int
delay
)
{
noteNumber
=
restrict7Bit
(
noteNumber
);
noteNumber
=
restrict7Bit
(
noteNumber
);
velocity
=
restrict7Bit
(
velocity
);
velocity
=
restrict7Bit
(
velocity
);
noteOn_internal
(
noteNumber
,
velocity
);
noteOn_internal
(
noteNumber
,
velocity
,
delay
);
if
(
current_mixer
!=
null
)
if
(
current_mixer
!=
null
)
current_mixer
.
noteOn
(
noteNumber
,
velocity
);
current_mixer
.
noteOn
(
noteNumber
,
velocity
);
}
}
private
void
noteOn_internal
(
int
noteNumber
,
int
velocity
)
{
private
void
noteOn_internal
(
int
noteNumber
,
int
velocity
,
int
delay
)
{
if
(
velocity
==
0
)
{
if
(
velocity
==
0
)
{
noteOff_internal
(
noteNumber
,
64
);
noteOff_internal
(
noteNumber
,
64
);
...
@@ -490,6 +497,7 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
...
@@ -490,6 +497,7 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
int
tunedKey
=
(
int
)(
Math
.
round
(
tuning
.
getTuning
()[
noteNumber
]/
100.0
));
int
tunedKey
=
(
int
)(
Math
.
round
(
tuning
.
getTuning
()[
noteNumber
]/
100.0
));
play_noteNumber
=
noteNumber
;
play_noteNumber
=
noteNumber
;
play_velocity
=
velocity
;
play_velocity
=
velocity
;
play_delay
=
delay
;
play_releasetriggered
=
false
;
play_releasetriggered
=
false
;
lastVelocity
[
noteNumber
]
=
velocity
;
lastVelocity
[
noteNumber
]
=
velocity
;
current_director
.
noteOn
(
tunedKey
,
velocity
);
current_director
.
noteOn
(
tunedKey
,
velocity
);
...
@@ -594,6 +602,7 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
...
@@ -594,6 +602,7 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
play_noteNumber
=
noteNumber
;
play_noteNumber
=
noteNumber
;
play_velocity
=
lastVelocity
[
noteNumber
];
play_velocity
=
lastVelocity
[
noteNumber
];
play_releasetriggered
=
true
;
play_releasetriggered
=
true
;
play_delay
=
0
;
current_director
.
noteOff
(
tunedKey
,
velocity
);
current_director
.
noteOff
(
tunedKey
,
velocity
);
}
}
...
@@ -604,12 +613,14 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
...
@@ -604,12 +613,14 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
private
int
voiceNo
=
0
;
private
int
voiceNo
=
0
;
private
int
play_noteNumber
=
0
;
private
int
play_noteNumber
=
0
;
private
int
play_velocity
=
0
;
private
int
play_velocity
=
0
;
private
int
play_delay
=
0
;
private
boolean
play_releasetriggered
=
false
;
private
boolean
play_releasetriggered
=
false
;
public
void
play
(
int
performerIndex
,
ModelConnectionBlock
[]
connectionBlocks
)
{
public
void
play
(
int
performerIndex
,
ModelConnectionBlock
[]
connectionBlocks
)
{
int
noteNumber
=
play_noteNumber
;
int
noteNumber
=
play_noteNumber
;
int
velocity
=
play_velocity
;
int
velocity
=
play_velocity
;
int
delay
=
play_delay
;
boolean
releasetriggered
=
play_releasetriggered
;
boolean
releasetriggered
=
play_releasetriggered
;
SoftPerformer
p
=
current_instrument
.
getPerformers
()[
performerIndex
];
SoftPerformer
p
=
current_instrument
.
getPerformers
()[
performerIndex
];
...
@@ -633,7 +644,7 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
...
@@ -633,7 +644,7 @@ public class SoftChannel implements MidiChannel, ModelDirectedPlayer {
if
(
voiceNo
==
-
1
)
if
(
voiceNo
==
-
1
)
return
;
return
;
initVoice
(
voices
[
voiceNo
],
p
,
prevVoiceID
,
noteNumber
,
velocity
,
initVoice
(
voices
[
voiceNo
],
p
,
prevVoiceID
,
noteNumber
,
velocity
,
delay
,
connectionBlocks
,
current_mixer
,
releasetriggered
);
connectionBlocks
,
current_mixer
,
releasetriggered
);
}
}
...
...
src/share/classes/com/sun/media/sound/SoftLimiter.java
浏览文件 @
ca2d21df
...
@@ -79,7 +79,7 @@ public class SoftLimiter implements SoftAudioProcessor {
...
@@ -79,7 +79,7 @@ public class SoftLimiter implements SoftAudioProcessor {
if
(
silentcounter
>
60
)
{
if
(
silentcounter
>
60
)
{
if
(!
mix
)
{
if
(!
mix
)
{
bufferLout
.
clear
();
bufferLout
.
clear
();
bufferRout
.
clear
();
if
(
bufferRout
!=
null
)
bufferRout
.
clear
();
}
}
return
;
return
;
}
}
...
...
src/share/classes/com/sun/media/sound/SoftMainMixer.java
浏览文件 @
ca2d21df
...
@@ -26,7 +26,6 @@ package com.sun.media.sound;
...
@@ -26,7 +26,6 @@ package com.sun.media.sound;
import
java.io.IOException
;
import
java.io.IOException
;
import
java.io.InputStream
;
import
java.io.InputStream
;
import
java.util.Arrays
;
import
java.util.HashSet
;
import
java.util.HashSet
;
import
java.util.Iterator
;
import
java.util.Iterator
;
import
java.util.Set
;
import
java.util.Set
;
...
@@ -46,28 +45,37 @@ import javax.sound.sampled.AudioSystem;
...
@@ -46,28 +45,37 @@ import javax.sound.sampled.AudioSystem;
*/
*/
public
class
SoftMainMixer
{
public
class
SoftMainMixer
{
// A private class thats contains a ModelChannelMixer and it's private buffers.
// This becomes necessary when we want to have separate delay buffers for each channel mixer.
private
class
SoftChannelMixerContainer
{
ModelChannelMixer
mixer
;
SoftAudioBuffer
[]
buffers
;
}
public
final
static
int
CHANNEL_LEFT
=
0
;
public
final
static
int
CHANNEL_LEFT
=
0
;
public
final
static
int
CHANNEL_RIGHT
=
1
;
public
final
static
int
CHANNEL_RIGHT
=
1
;
public
final
static
int
CHANNEL_MONO
=
2
;
public
final
static
int
CHANNEL_MONO
=
2
;
public
final
static
int
CHANNEL_EFFECT1
=
3
;
public
final
static
int
CHANNEL_DELAY_LEFT
=
3
;
public
final
static
int
CHANNEL_EFFECT2
=
4
;
public
final
static
int
CHANNEL_DELAY_RIGHT
=
4
;
public
final
static
int
CHANNEL_EFFECT3
=
5
;
public
final
static
int
CHANNEL_DELAY_MONO
=
5
;
public
final
static
int
CHANNEL_EFFECT4
=
6
;
public
final
static
int
CHANNEL_EFFECT1
=
6
;
public
final
static
int
CHANNEL_EFFECT2
=
7
;
public
final
static
int
CHANNEL_DELAY_EFFECT1
=
8
;
public
final
static
int
CHANNEL_DELAY_EFFECT2
=
9
;
public
final
static
int
CHANNEL_LEFT_DRY
=
10
;
public
final
static
int
CHANNEL_LEFT_DRY
=
10
;
public
final
static
int
CHANNEL_RIGHT_DRY
=
11
;
public
final
static
int
CHANNEL_RIGHT_DRY
=
11
;
public
final
static
int
CHANNEL_SCRATCH1
=
12
;
public
final
static
int
CHANNEL_SCRATCH1
=
12
;
public
final
static
int
CHANNEL_SCRATCH2
=
13
;
public
final
static
int
CHANNEL_SCRATCH2
=
13
;
public
final
static
int
CHANNEL_CHANNELMIXER_LEFT
=
14
;
public
final
static
int
CHANNEL_CHANNELMIXER_RIGHT
=
15
;
public
final
static
int
CHANNEL_CHANNELMIXER_MONO
=
16
;
protected
boolean
active_sensing_on
=
false
;
protected
boolean
active_sensing_on
=
false
;
private
long
msec_last_activity
=
-
1
;
private
long
msec_last_activity
=
-
1
;
private
boolean
pusher_silent
=
false
;
private
boolean
pusher_silent
=
false
;
private
int
pusher_silent_count
=
0
;
private
int
pusher_silent_count
=
0
;
private
long
msec
_pos
=
0
;
private
long
sample
_pos
=
0
;
protected
boolean
readfully
=
true
;
protected
boolean
readfully
=
true
;
private
Object
control_mutex
;
private
Object
control_mutex
;
private
SoftSynthesizer
synth
;
private
SoftSynthesizer
synth
;
private
float
samplerate
=
44100
;
private
int
nrofchannels
=
2
;
private
int
nrofchannels
=
2
;
private
SoftVoice
[]
voicestatus
=
null
;
private
SoftVoice
[]
voicestatus
=
null
;
private
SoftAudioBuffer
[]
buffers
;
private
SoftAudioBuffer
[]
buffers
;
...
@@ -75,7 +83,10 @@ public class SoftMainMixer {
...
@@ -75,7 +83,10 @@ public class SoftMainMixer {
private
SoftAudioProcessor
chorus
;
private
SoftAudioProcessor
chorus
;
private
SoftAudioProcessor
agc
;
private
SoftAudioProcessor
agc
;
private
long
msec_buffer_len
=
0
;
private
long
msec_buffer_len
=
0
;
private
int
buffer_len
=
0
;
protected
TreeMap
<
Long
,
Object
>
midimessages
=
new
TreeMap
<
Long
,
Object
>();
protected
TreeMap
<
Long
,
Object
>
midimessages
=
new
TreeMap
<
Long
,
Object
>();
private
int
delay_midievent
=
0
;
private
int
max_delay_midievent
=
0
;
double
last_volume_left
=
1.0
;
double
last_volume_left
=
1.0
;
double
last_volume_right
=
1.0
;
double
last_volume_right
=
1.0
;
private
double
[]
co_master_balance
=
new
double
[
1
];
private
double
[]
co_master_balance
=
new
double
[
1
];
...
@@ -83,9 +94,9 @@ public class SoftMainMixer {
...
@@ -83,9 +94,9 @@ public class SoftMainMixer {
private
double
[]
co_master_coarse_tuning
=
new
double
[
1
];
private
double
[]
co_master_coarse_tuning
=
new
double
[
1
];
private
double
[]
co_master_fine_tuning
=
new
double
[
1
];
private
double
[]
co_master_fine_tuning
=
new
double
[
1
];
private
AudioInputStream
ais
;
private
AudioInputStream
ais
;
private
Set
<
ModelChannelMix
er
>
registeredMixers
=
null
;
private
Set
<
SoftChannelMixerContain
er
>
registeredMixers
=
null
;
private
Set
<
ModelChannelMixer
>
stoppedMixers
=
null
;
private
Set
<
ModelChannelMixer
>
stoppedMixers
=
null
;
private
ModelChannelMix
er
[]
cur_registeredMixers
=
null
;
private
SoftChannelMixerContain
er
[]
cur_registeredMixers
=
null
;
protected
SoftControl
co_master
=
new
SoftControl
()
{
protected
SoftControl
co_master
=
new
SoftControl
()
{
double
[]
balance
=
co_master_balance
;
double
[]
balance
=
co_master_balance
;
...
@@ -413,26 +424,68 @@ public class SoftMainMixer {
...
@@ -413,26 +424,68 @@ public class SoftMainMixer {
Iterator
<
Entry
<
Long
,
Object
>>
iter
=
midimessages
.
entrySet
().
iterator
();
Iterator
<
Entry
<
Long
,
Object
>>
iter
=
midimessages
.
entrySet
().
iterator
();
while
(
iter
.
hasNext
())
{
while
(
iter
.
hasNext
())
{
Entry
<
Long
,
Object
>
entry
=
iter
.
next
();
Entry
<
Long
,
Object
>
entry
=
iter
.
next
();
if
(
entry
.
getKey
()
>
(
timeStamp
+
100
))
if
(
entry
.
getKey
()
>
=
(
timeStamp
+
msec_buffer_len
))
return
;
return
;
long
msec_delay
=
entry
.
getKey
()
-
timeStamp
;
delay_midievent
=
(
int
)(
msec_delay
*
(
samplerate
/
1000000.0
)
+
0.5
);
if
(
delay_midievent
>
max_delay_midievent
)
delay_midievent
=
max_delay_midievent
;
if
(
delay_midievent
<
0
)
delay_midievent
=
0
;
processMessage
(
entry
.
getValue
());
processMessage
(
entry
.
getValue
());
iter
.
remove
();
iter
.
remove
();
}
}
delay_midievent
=
0
;
}
}
protected
void
processAudioBuffers
()
{
protected
void
processAudioBuffers
()
{
if
(
synth
.
weakstream
!=
null
&&
synth
.
weakstream
.
silent_samples
!=
0
)
{
sample_pos
+=
synth
.
weakstream
.
silent_samples
;
synth
.
weakstream
.
silent_samples
=
0
;
}
for
(
int
i
=
0
;
i
<
buffers
.
length
;
i
++)
{
for
(
int
i
=
0
;
i
<
buffers
.
length
;
i
++)
{
buffers
[
i
].
clear
();
if
(
i
!=
CHANNEL_DELAY_LEFT
&&
i
!=
CHANNEL_DELAY_RIGHT
&&
i
!=
CHANNEL_DELAY_MONO
&&
i
!=
CHANNEL_DELAY_EFFECT1
&&
i
!=
CHANNEL_DELAY_EFFECT2
)
buffers
[
i
].
clear
();
}
if
(!
buffers
[
CHANNEL_DELAY_LEFT
].
isSilent
())
{
buffers
[
CHANNEL_LEFT
].
swap
(
buffers
[
CHANNEL_DELAY_LEFT
]);
}
if
(!
buffers
[
CHANNEL_DELAY_RIGHT
].
isSilent
())
{
buffers
[
CHANNEL_RIGHT
].
swap
(
buffers
[
CHANNEL_DELAY_RIGHT
]);
}
if
(!
buffers
[
CHANNEL_DELAY_MONO
].
isSilent
())
{
buffers
[
CHANNEL_MONO
].
swap
(
buffers
[
CHANNEL_DELAY_MONO
]);
}
if
(!
buffers
[
CHANNEL_DELAY_EFFECT1
].
isSilent
())
{
buffers
[
CHANNEL_EFFECT1
].
swap
(
buffers
[
CHANNEL_DELAY_EFFECT1
]);
}
if
(!
buffers
[
CHANNEL_DELAY_EFFECT2
].
isSilent
())
{
buffers
[
CHANNEL_EFFECT2
].
swap
(
buffers
[
CHANNEL_DELAY_EFFECT2
]);
}
}
double
volume_left
;
double
volume_left
;
double
volume_right
;
double
volume_right
;
ModelChannelMix
er
[]
act_registeredMixers
;
SoftChannelMixerContain
er
[]
act_registeredMixers
;
// perform control logic
// perform control logic
synchronized
(
control_mutex
)
{
synchronized
(
control_mutex
)
{
long
msec_pos
=
(
long
)(
sample_pos
*
(
1000000.0
/
samplerate
));
processMessages
(
msec_pos
);
processMessages
(
msec_pos
);
if
(
active_sensing_on
)
{
if
(
active_sensing_on
)
{
...
@@ -450,7 +503,7 @@ public class SoftMainMixer {
...
@@ -450,7 +503,7 @@ public class SoftMainMixer {
for
(
int
i
=
0
;
i
<
voicestatus
.
length
;
i
++)
for
(
int
i
=
0
;
i
<
voicestatus
.
length
;
i
++)
if
(
voicestatus
[
i
].
active
)
if
(
voicestatus
[
i
].
active
)
voicestatus
[
i
].
processControlLogic
();
voicestatus
[
i
].
processControlLogic
();
msec_pos
+=
msec_
buffer_len
;
sample_pos
+=
buffer_len
;
double
volume
=
co_master_volume
[
0
];
double
volume
=
co_master_volume
[
0
];
volume_left
=
volume
;
volume_left
=
volume
;
...
@@ -469,7 +522,7 @@ public class SoftMainMixer {
...
@@ -469,7 +522,7 @@ public class SoftMainMixer {
if
(
cur_registeredMixers
==
null
)
{
if
(
cur_registeredMixers
==
null
)
{
if
(
registeredMixers
!=
null
)
{
if
(
registeredMixers
!=
null
)
{
cur_registeredMixers
=
cur_registeredMixers
=
new
ModelChannelMix
er
[
registeredMixers
.
size
()];
new
SoftChannelMixerContain
er
[
registeredMixers
.
size
()];
registeredMixers
.
toArray
(
cur_registeredMixers
);
registeredMixers
.
toArray
(
cur_registeredMixers
);
}
}
}
}
...
@@ -483,44 +536,61 @@ public class SoftMainMixer {
...
@@ -483,44 +536,61 @@ public class SoftMainMixer {
if
(
act_registeredMixers
!=
null
)
{
if
(
act_registeredMixers
!=
null
)
{
// Reroute default left,right output
// Make backup of left,right,mono channels
// to channelmixer left,right input/output
SoftAudioBuffer
leftbak
=
buffers
[
CHANNEL_LEFT
];
SoftAudioBuffer
leftbak
=
buffers
[
CHANNEL_LEFT
];
SoftAudioBuffer
rightbak
=
buffers
[
CHANNEL_RIGHT
];
SoftAudioBuffer
rightbak
=
buffers
[
CHANNEL_RIGHT
];
SoftAudioBuffer
monobak
=
buffers
[
CHANNEL_MONO
];
SoftAudioBuffer
monobak
=
buffers
[
CHANNEL_MONO
];
buffers
[
CHANNEL_LEFT
]
=
buffers
[
CHANNEL_CHANNELMIXER
_LEFT
];
SoftAudioBuffer
delayleftbak
=
buffers
[
CHANNEL_DELAY
_LEFT
];
buffers
[
CHANNEL_RIGHT
]
=
buffers
[
CHANNEL_CHANNELMIXER
_RIGHT
];
SoftAudioBuffer
delayrightbak
=
buffers
[
CHANNEL_DELAY
_RIGHT
];
buffers
[
CHANNEL_MONO
]
=
buffers
[
CHANNEL_CHANNELMIXER
_MONO
];
SoftAudioBuffer
delaymonobak
=
buffers
[
CHANNEL_DELAY
_MONO
];
int
bufferlen
=
buffers
[
CHANNEL_LEFT
].
getSize
();
int
bufferlen
=
buffers
[
CHANNEL_LEFT
].
getSize
();
float
[][]
cbuffer
=
new
float
[
nrofchannels
][];
float
[][]
cbuffer
=
new
float
[
nrofchannels
][];
cbuffer
[
0
]
=
buffers
[
CHANNEL_LEFT
].
array
();
if
(
nrofchannels
!=
1
)
cbuffer
[
1
]
=
buffers
[
CHANNEL_RIGHT
].
array
();
float
[][]
obuffer
=
new
float
[
nrofchannels
][];
float
[][]
obuffer
=
new
float
[
nrofchannels
][];
obuffer
[
0
]
=
leftbak
.
array
();
obuffer
[
0
]
=
leftbak
.
array
();
if
(
nrofchannels
!=
1
)
if
(
nrofchannels
!=
1
)
obuffer
[
1
]
=
rightbak
.
array
();
obuffer
[
1
]
=
rightbak
.
array
();
for
(
ModelChannelMixer
cmixer
:
act_registeredMixers
)
{
for
(
SoftChannelMixerContainer
cmixer
:
act_registeredMixers
)
{
for
(
int
i
=
0
;
i
<
cbuffer
.
length
;
i
++)
Arrays
.
fill
(
cbuffer
[
i
],
0
);
// Reroute default left,right output
// to channelmixer left,right input/output
buffers
[
CHANNEL_LEFT
]
=
cmixer
.
buffers
[
CHANNEL_LEFT
];
buffers
[
CHANNEL_RIGHT
]
=
cmixer
.
buffers
[
CHANNEL_RIGHT
];
buffers
[
CHANNEL_MONO
]
=
cmixer
.
buffers
[
CHANNEL_MONO
];
buffers
[
CHANNEL_DELAY_LEFT
]
=
cmixer
.
buffers
[
CHANNEL_DELAY_LEFT
];
buffers
[
CHANNEL_DELAY_RIGHT
]
=
cmixer
.
buffers
[
CHANNEL_DELAY_RIGHT
];
buffers
[
CHANNEL_DELAY_MONO
]
=
cmixer
.
buffers
[
CHANNEL_DELAY_MONO
];
buffers
[
CHANNEL_LEFT
].
clear
();
buffers
[
CHANNEL_RIGHT
].
clear
();
buffers
[
CHANNEL_MONO
].
clear
();
buffers
[
CHANNEL_MONO
].
clear
();
if
(!
buffers
[
CHANNEL_DELAY_LEFT
].
isSilent
())
{
buffers
[
CHANNEL_LEFT
].
swap
(
buffers
[
CHANNEL_DELAY_LEFT
]);
}
if
(!
buffers
[
CHANNEL_DELAY_RIGHT
].
isSilent
())
{
buffers
[
CHANNEL_RIGHT
].
swap
(
buffers
[
CHANNEL_DELAY_RIGHT
]);
}
if
(!
buffers
[
CHANNEL_DELAY_MONO
].
isSilent
())
{
buffers
[
CHANNEL_MONO
].
swap
(
buffers
[
CHANNEL_DELAY_MONO
]);
}
cbuffer
[
0
]
=
buffers
[
CHANNEL_LEFT
].
array
();
if
(
nrofchannels
!=
1
)
cbuffer
[
1
]
=
buffers
[
CHANNEL_RIGHT
].
array
();
boolean
hasactivevoices
=
false
;
boolean
hasactivevoices
=
false
;
for
(
int
i
=
0
;
i
<
voicestatus
.
length
;
i
++)
for
(
int
i
=
0
;
i
<
voicestatus
.
length
;
i
++)
if
(
voicestatus
[
i
].
active
)
if
(
voicestatus
[
i
].
active
)
if
(
voicestatus
[
i
].
channelmixer
==
cmixer
)
{
if
(
voicestatus
[
i
].
channelmixer
==
cmixer
.
mixer
)
{
voicestatus
[
i
].
processAudioLogic
(
buffers
);
voicestatus
[
i
].
processAudioLogic
(
buffers
);
hasactivevoices
=
true
;
hasactivevoices
=
true
;
}
}
if
(!
cmixer
.
process
(
cbuffer
,
0
,
bufferlen
))
{
synchronized
(
control_mutex
)
{
registeredMixers
.
remove
(
cmixer
);
cur_registeredMixers
=
null
;
}
}
if
(!
buffers
[
CHANNEL_MONO
].
isSilent
())
if
(!
buffers
[
CHANNEL_MONO
].
isSilent
())
{
{
...
@@ -542,6 +612,13 @@ public class SoftMainMixer {
...
@@ -542,6 +612,13 @@ public class SoftMainMixer {
}
}
}
}
if
(!
cmixer
.
mixer
.
process
(
cbuffer
,
0
,
bufferlen
))
{
synchronized
(
control_mutex
)
{
registeredMixers
.
remove
(
cmixer
);
cur_registeredMixers
=
null
;
}
}
for
(
int
i
=
0
;
i
<
cbuffer
.
length
;
i
++)
{
for
(
int
i
=
0
;
i
<
cbuffer
.
length
;
i
++)
{
float
[]
cbuff
=
cbuffer
[
i
];
float
[]
cbuff
=
cbuffer
[
i
];
float
[]
obuff
=
obuffer
[
i
];
float
[]
obuff
=
obuffer
[
i
];
...
@@ -554,7 +631,7 @@ public class SoftMainMixer {
...
@@ -554,7 +631,7 @@ public class SoftMainMixer {
if
(
stoppedMixers
!=
null
)
{
if
(
stoppedMixers
!=
null
)
{
if
(
stoppedMixers
.
contains
(
cmixer
))
{
if
(
stoppedMixers
.
contains
(
cmixer
))
{
stoppedMixers
.
remove
(
cmixer
);
stoppedMixers
.
remove
(
cmixer
);
cmixer
.
stop
();
cmixer
.
mixer
.
stop
();
}
}
}
}
}
}
...
@@ -565,6 +642,9 @@ public class SoftMainMixer {
...
@@ -565,6 +642,9 @@ public class SoftMainMixer {
buffers
[
CHANNEL_LEFT
]
=
leftbak
;
buffers
[
CHANNEL_LEFT
]
=
leftbak
;
buffers
[
CHANNEL_RIGHT
]
=
rightbak
;
buffers
[
CHANNEL_RIGHT
]
=
rightbak
;
buffers
[
CHANNEL_MONO
]
=
monobak
;
buffers
[
CHANNEL_MONO
]
=
monobak
;
buffers
[
CHANNEL_DELAY_LEFT
]
=
delayleftbak
;
buffers
[
CHANNEL_DELAY_RIGHT
]
=
delayrightbak
;
buffers
[
CHANNEL_DELAY_MONO
]
=
delaymonobak
;
}
}
...
@@ -650,14 +730,23 @@ public class SoftMainMixer {
...
@@ -650,14 +730,23 @@ public class SoftMainMixer {
if
(
buffers
[
CHANNEL_LEFT
].
isSilent
()
if
(
buffers
[
CHANNEL_LEFT
].
isSilent
()
&&
buffers
[
CHANNEL_RIGHT
].
isSilent
())
&&
buffers
[
CHANNEL_RIGHT
].
isSilent
())
{
{
pusher_silent_count
++;
if
(
pusher_silent_count
>
5
)
int
midimessages_size
;
synchronized
(
control_mutex
)
{
midimessages_size
=
midimessages
.
size
();
}
if
(
midimessages_size
==
0
)
{
{
pusher_silent_count
=
0
;
pusher_silent_count
++;
synchronized
(
control_mutex
)
{
if
(
pusher_silent_count
>
5
)
pusher_silent
=
true
;
{
if
(
synth
.
weakstream
!=
null
)
pusher_silent_count
=
0
;
synth
.
weakstream
.
setInputStream
(
null
);
synchronized
(
control_mutex
)
{
pusher_silent
=
true
;
if
(
synth
.
weakstream
!=
null
)
synth
.
weakstream
.
setInputStream
(
null
);
}
}
}
}
}
}
}
...
@@ -672,13 +761,18 @@ public class SoftMainMixer {
...
@@ -672,13 +761,18 @@ public class SoftMainMixer {
// Must only we called within control_mutex synchronization
// Must only we called within control_mutex synchronization
public
void
activity
()
public
void
activity
()
{
{
msec_last_activity
=
msec_pos
;
long
silent_samples
=
0
;
if
(
pusher_silent
)
if
(
pusher_silent
)
{
{
pusher_silent
=
false
;
pusher_silent
=
false
;
if
(
synth
.
weakstream
!=
null
)
if
(
synth
.
weakstream
!=
null
)
{
synth
.
weakstream
.
setInputStream
(
ais
);
synth
.
weakstream
.
setInputStream
(
ais
);
silent_samples
=
synth
.
weakstream
.
silent_samples
;
}
}
}
msec_last_activity
=
(
long
)((
sample_pos
+
silent_samples
)
*
(
1000000.0
/
samplerate
));
}
}
public
void
stopMixer
(
ModelChannelMixer
mixer
)
{
public
void
stopMixer
(
ModelChannelMixer
mixer
)
{
...
@@ -689,15 +783,22 @@ public class SoftMainMixer {
...
@@ -689,15 +783,22 @@ public class SoftMainMixer {
public
void
registerMixer
(
ModelChannelMixer
mixer
)
{
public
void
registerMixer
(
ModelChannelMixer
mixer
)
{
if
(
registeredMixers
==
null
)
if
(
registeredMixers
==
null
)
registeredMixers
=
new
HashSet
<
ModelChannelMixer
>();
registeredMixers
=
new
HashSet
<
SoftChannelMixerContainer
>();
registeredMixers
.
add
(
mixer
);
SoftChannelMixerContainer
mixercontainer
=
new
SoftChannelMixerContainer
();
mixercontainer
.
buffers
=
new
SoftAudioBuffer
[
6
];
for
(
int
i
=
0
;
i
<
mixercontainer
.
buffers
.
length
;
i
++)
{
mixercontainer
.
buffers
[
i
]
=
new
SoftAudioBuffer
(
buffer_len
,
synth
.
getFormat
());
}
mixercontainer
.
mixer
=
mixer
;
registeredMixers
.
add
(
mixercontainer
);
cur_registeredMixers
=
null
;
cur_registeredMixers
=
null
;
}
}
public
SoftMainMixer
(
SoftSynthesizer
synth
)
{
public
SoftMainMixer
(
SoftSynthesizer
synth
)
{
this
.
synth
=
synth
;
this
.
synth
=
synth
;
msec
_pos
=
0
;
sample
_pos
=
0
;
co_master_balance
[
0
]
=
0.5
;
co_master_balance
[
0
]
=
0.5
;
co_master_volume
[
0
]
=
1
;
co_master_volume
[
0
]
=
1
;
...
@@ -705,14 +806,18 @@ public class SoftMainMixer {
...
@@ -705,14 +806,18 @@ public class SoftMainMixer {
co_master_fine_tuning
[
0
]
=
0.5
;
co_master_fine_tuning
[
0
]
=
0.5
;
msec_buffer_len
=
(
long
)
(
1000000.0
/
synth
.
getControlRate
());
msec_buffer_len
=
(
long
)
(
1000000.0
/
synth
.
getControlRate
());
samplerate
=
synth
.
getFormat
().
getSampleRate
();
nrofchannels
=
synth
.
getFormat
().
getChannels
();
nrofchannels
=
synth
.
getFormat
().
getChannels
();
int
buffersize
=
(
int
)
(
synth
.
getFormat
().
getSampleRate
()
int
buffersize
=
(
int
)
(
synth
.
getFormat
().
getSampleRate
()
/
synth
.
getControlRate
());
/
synth
.
getControlRate
());
buffer_len
=
buffersize
;
max_delay_midievent
=
buffersize
;
control_mutex
=
synth
.
control_mutex
;
control_mutex
=
synth
.
control_mutex
;
buffers
=
new
SoftAudioBuffer
[
1
7
];
buffers
=
new
SoftAudioBuffer
[
1
4
];
for
(
int
i
=
0
;
i
<
buffers
.
length
;
i
++)
{
for
(
int
i
=
0
;
i
<
buffers
.
length
;
i
++)
{
buffers
[
i
]
=
new
SoftAudioBuffer
(
buffersize
,
synth
.
getFormat
());
buffers
[
i
]
=
new
SoftAudioBuffer
(
buffersize
,
synth
.
getFormat
());
}
}
...
@@ -994,7 +1099,10 @@ public class SoftMainMixer {
...
@@ -994,7 +1099,10 @@ public class SoftMainMixer {
switch
(
cmd
)
{
switch
(
cmd
)
{
case
ShortMessage
.
NOTE_ON
:
case
ShortMessage
.
NOTE_ON
:
softchannel
.
noteOn
(
data1
,
data2
);
if
(
delay_midievent
!=
0
)
softchannel
.
noteOn
(
data1
,
data2
,
delay_midievent
);
else
softchannel
.
noteOn
(
data1
,
data2
);
break
;
break
;
case
ShortMessage
.
NOTE_OFF
:
case
ShortMessage
.
NOTE_OFF
:
softchannel
.
noteOff
(
data1
,
data2
);
softchannel
.
noteOff
(
data1
,
data2
);
...
@@ -1021,7 +1129,15 @@ public class SoftMainMixer {
...
@@ -1021,7 +1129,15 @@ public class SoftMainMixer {
}
}
public
long
getMicrosecondPosition
()
{
public
long
getMicrosecondPosition
()
{
return
msec_pos
;
if
(
pusher_silent
)
{
if
(
synth
.
weakstream
!=
null
)
{
return
(
long
)((
sample_pos
+
synth
.
weakstream
.
silent_samples
)
*
(
1000000.0
/
samplerate
));
}
}
return
(
long
)(
sample_pos
*
(
1000000.0
/
samplerate
));
}
}
public
void
close
()
{
public
void
close
()
{
...
...
src/share/classes/com/sun/media/sound/SoftReceiver.java
浏览文件 @
ca2d21df
...
@@ -26,8 +26,8 @@ package com.sun.media.sound;
...
@@ -26,8 +26,8 @@ package com.sun.media.sound;
import
java.util.TreeMap
;
import
java.util.TreeMap
;
import
javax.sound.midi.MidiDevice
;
import
javax.sound.midi.MidiMessage
;
import
javax.sound.midi.MidiMessage
;
import
javax.sound.midi.Receiver
;
import
javax.sound.midi.ShortMessage
;
import
javax.sound.midi.ShortMessage
;
/**
/**
...
@@ -35,7 +35,7 @@ import javax.sound.midi.ShortMessage;
...
@@ -35,7 +35,7 @@ import javax.sound.midi.ShortMessage;
*
*
* @author Karl Helgason
* @author Karl Helgason
*/
*/
public
class
SoftReceiver
implements
Receiver
{
public
class
SoftReceiver
implements
MidiDevice
Receiver
{
protected
boolean
open
=
true
;
protected
boolean
open
=
true
;
private
Object
control_mutex
;
private
Object
control_mutex
;
...
@@ -51,6 +51,10 @@ public class SoftReceiver implements Receiver {
...
@@ -51,6 +51,10 @@ public class SoftReceiver implements Receiver {
this
.
midimessages
=
mainmixer
.
midimessages
;
this
.
midimessages
=
mainmixer
.
midimessages
;
}
}
public
MidiDevice
getMidiDevice
()
{
return
synth
;
}
public
void
send
(
MidiMessage
message
,
long
timeStamp
)
{
public
void
send
(
MidiMessage
message
,
long
timeStamp
)
{
synchronized
(
control_mutex
)
{
synchronized
(
control_mutex
)
{
...
@@ -60,6 +64,7 @@ public class SoftReceiver implements Receiver {
...
@@ -60,6 +64,7 @@ public class SoftReceiver implements Receiver {
if
(
timeStamp
!=
-
1
)
{
if
(
timeStamp
!=
-
1
)
{
synchronized
(
control_mutex
)
{
synchronized
(
control_mutex
)
{
mainmixer
.
activity
();
while
(
midimessages
.
get
(
timeStamp
)
!=
null
)
while
(
midimessages
.
get
(
timeStamp
)
!=
null
)
timeStamp
++;
timeStamp
++;
if
(
message
instanceof
ShortMessage
if
(
message
instanceof
ShortMessage
...
...
src/share/classes/com/sun/media/sound/SoftSynthesizer.java
浏览文件 @
ca2d21df
...
@@ -66,6 +66,8 @@ public class SoftSynthesizer implements AudioSynthesizer,
...
@@ -66,6 +66,8 @@ public class SoftSynthesizer implements AudioSynthesizer,
public
SoftAudioPusher
pusher
=
null
;
public
SoftAudioPusher
pusher
=
null
;
public
AudioInputStream
jitter_stream
=
null
;
public
AudioInputStream
jitter_stream
=
null
;
public
SourceDataLine
sourceDataLine
=
null
;
public
SourceDataLine
sourceDataLine
=
null
;
public
volatile
long
silent_samples
=
0
;
private
int
framesize
=
0
;
private
WeakReference
<
AudioInputStream
>
weak_stream_link
;
private
WeakReference
<
AudioInputStream
>
weak_stream_link
;
private
AudioFloatConverter
converter
;
private
AudioFloatConverter
converter
;
private
float
[]
silentbuffer
=
null
;
private
float
[]
silentbuffer
=
null
;
...
@@ -101,6 +103,8 @@ public class SoftSynthesizer implements AudioSynthesizer,
...
@@ -101,6 +103,8 @@ public class SoftSynthesizer implements AudioSynthesizer,
silentbuffer
=
new
float
[
flen
];
silentbuffer
=
new
float
[
flen
];
converter
.
toByteArray
(
silentbuffer
,
flen
,
b
,
off
);
converter
.
toByteArray
(
silentbuffer
,
flen
,
b
,
off
);
silent_samples
+=
(
long
)((
len
/
framesize
));
if
(
pusher
!=
null
)
if
(
pusher
!=
null
)
if
(
weak_stream_link
.
get
()
==
null
)
if
(
weak_stream_link
.
get
()
==
null
)
{
{
...
@@ -136,6 +140,7 @@ public class SoftSynthesizer implements AudioSynthesizer,
...
@@ -136,6 +140,7 @@ public class SoftSynthesizer implements AudioSynthesizer,
weak_stream_link
=
new
WeakReference
<
AudioInputStream
>(
stream
);
weak_stream_link
=
new
WeakReference
<
AudioInputStream
>(
stream
);
converter
=
AudioFloatConverter
.
getConverter
(
stream
.
getFormat
());
converter
=
AudioFloatConverter
.
getConverter
(
stream
.
getFormat
());
samplesize
=
stream
.
getFormat
().
getFrameSize
()
/
stream
.
getFormat
().
getChannels
();
samplesize
=
stream
.
getFormat
().
getFrameSize
()
/
stream
.
getFormat
().
getChannels
();
framesize
=
stream
.
getFormat
().
getFrameSize
();
}
}
public
AudioInputStream
getAudioInputStream
()
public
AudioInputStream
getAudioInputStream
()
...
...
src/share/classes/com/sun/media/sound/SoftVoice.java
浏览文件 @
ca2d21df
...
@@ -43,6 +43,7 @@ public class SoftVoice extends VoiceStatus {
...
@@ -43,6 +43,7 @@ public class SoftVoice extends VoiceStatus {
private
int
noteOn_noteNumber
=
0
;
private
int
noteOn_noteNumber
=
0
;
private
int
noteOn_velocity
=
0
;
private
int
noteOn_velocity
=
0
;
private
int
noteOff_velocity
=
0
;
private
int
noteOff_velocity
=
0
;
private
int
delay
=
0
;
protected
ModelChannelMixer
channelmixer
=
null
;
protected
ModelChannelMixer
channelmixer
=
null
;
protected
double
tunedKey
=
0
;
protected
double
tunedKey
=
0
;
protected
SoftTuning
tuning
=
null
;
protected
SoftTuning
tuning
=
null
;
...
@@ -294,7 +295,7 @@ public class SoftVoice extends VoiceStatus {
...
@@ -294,7 +295,7 @@ public class SoftVoice extends VoiceStatus {
tunedKey
=
tuning
.
getTuning
(
noteNumber
)
/
100.0
;
tunedKey
=
tuning
.
getTuning
(
noteNumber
)
/
100.0
;
}
}
protected
void
noteOn
(
int
noteNumber
,
int
velocity
)
{
protected
void
noteOn
(
int
noteNumber
,
int
velocity
,
int
delay
)
{
sustain
=
false
;
sustain
=
false
;
sostenuto
=
false
;
sostenuto
=
false
;
...
@@ -308,6 +309,7 @@ public class SoftVoice extends VoiceStatus {
...
@@ -308,6 +309,7 @@ public class SoftVoice extends VoiceStatus {
noteOn_noteNumber
=
noteNumber
;
noteOn_noteNumber
=
noteNumber
;
noteOn_velocity
=
velocity
;
noteOn_velocity
=
velocity
;
this
.
delay
=
delay
;
lastMuteValue
=
0
;
lastMuteValue
=
0
;
lastSoloMuteValue
=
0
;
lastSoloMuteValue
=
0
;
...
@@ -562,7 +564,7 @@ public class SoftVoice extends VoiceStatus {
...
@@ -562,7 +564,7 @@ public class SoftVoice extends VoiceStatus {
if
(
stealer_channel
!=
null
)
{
if
(
stealer_channel
!=
null
)
{
stealer_channel
.
initVoice
(
this
,
stealer_performer
,
stealer_channel
.
initVoice
(
this
,
stealer_performer
,
stealer_voiceID
,
stealer_noteNumber
,
stealer_velocity
,
stealer_voiceID
,
stealer_noteNumber
,
stealer_velocity
,
0
,
stealer_extendedConnectionBlocks
,
stealer_channelmixer
,
stealer_extendedConnectionBlocks
,
stealer_channelmixer
,
stealer_releaseTriggered
);
stealer_releaseTriggered
);
stealer_releaseTriggered
=
false
;
stealer_releaseTriggered
=
false
;
...
@@ -733,23 +735,55 @@ public class SoftVoice extends VoiceStatus {
...
@@ -733,23 +735,55 @@ public class SoftVoice extends VoiceStatus {
}
}
protected
void
mixAudioStream
(
SoftAudioBuffer
in
,
SoftAudioBuffer
out
,
protected
void
mixAudioStream
(
SoftAudioBuffer
in
,
SoftAudioBuffer
out
,
SoftAudioBuffer
dout
,
float
amp_from
,
float
amp_to
)
{
float
amp_from
,
float
amp_to
)
{
int
bufferlen
=
in
.
getSize
();
int
bufferlen
=
in
.
getSize
();
if
(
amp_from
<
0.000000001
&&
amp_to
<
0.000000001
)
if
(
amp_from
<
0.000000001
&&
amp_to
<
0.000000001
)
return
;
return
;
if
(
amp_from
==
amp_to
)
{
if
(
dout
!=
null
&&
delay
!=
0
)
float
[]
fout
=
out
.
array
();
{
float
[]
fin
=
in
.
array
();
if
(
amp_from
==
amp_to
)
{
for
(
int
i
=
0
;
i
<
bufferlen
;
i
++)
float
[]
fout
=
out
.
array
();
fout
[
i
]
+=
fin
[
i
]
*
amp_to
;
float
[]
fin
=
in
.
array
();
}
else
{
int
j
=
0
;
float
amp
=
amp_from
;
for
(
int
i
=
delay
;
i
<
bufferlen
;
i
++)
float
amp_delta
=
(
amp_to
-
amp_from
)
/
bufferlen
;
fout
[
i
]
+=
fin
[
j
++]
*
amp_to
;
float
[]
fout
=
out
.
array
();
fout
=
dout
.
array
();
float
[]
fin
=
in
.
array
();
for
(
int
i
=
0
;
i
<
delay
;
i
++)
for
(
int
i
=
0
;
i
<
bufferlen
;
i
++)
{
fout
[
i
]
+=
fin
[
j
++]
*
amp_to
;
amp
+=
amp_delta
;
}
else
{
fout
[
i
]
+=
fin
[
i
]
*
amp
;
float
amp
=
amp_from
;
float
amp_delta
=
(
amp_to
-
amp_from
)
/
bufferlen
;
float
[]
fout
=
out
.
array
();
float
[]
fin
=
in
.
array
();
int
j
=
0
;
for
(
int
i
=
delay
;
i
<
bufferlen
;
i
++)
{
amp
+=
amp_delta
;
fout
[
i
]
+=
fin
[
j
++]
*
amp
;
}
fout
=
dout
.
array
();
for
(
int
i
=
0
;
i
<
delay
;
i
++)
{
amp
+=
amp_delta
;
fout
[
i
]
+=
fin
[
j
++]
*
amp
;
}
}
}
else
{
if
(
amp_from
==
amp_to
)
{
float
[]
fout
=
out
.
array
();
float
[]
fin
=
in
.
array
();
for
(
int
i
=
0
;
i
<
bufferlen
;
i
++)
fout
[
i
]
+=
fin
[
i
]
*
amp_to
;
}
else
{
float
amp
=
amp_from
;
float
amp_delta
=
(
amp_to
-
amp_from
)
/
bufferlen
;
float
[]
fout
=
out
.
array
();
float
[]
fin
=
in
.
array
();
for
(
int
i
=
0
;
i
<
bufferlen
;
i
++)
{
amp
+=
amp_delta
;
fout
[
i
]
+=
fin
[
i
]
*
amp
;
}
}
}
}
}
...
@@ -785,6 +819,13 @@ public class SoftVoice extends VoiceStatus {
...
@@ -785,6 +819,13 @@ public class SoftVoice extends VoiceStatus {
SoftAudioBuffer
mono
=
buffer
[
SoftMainMixer
.
CHANNEL_MONO
];
SoftAudioBuffer
mono
=
buffer
[
SoftMainMixer
.
CHANNEL_MONO
];
SoftAudioBuffer
eff1
=
buffer
[
SoftMainMixer
.
CHANNEL_EFFECT1
];
SoftAudioBuffer
eff1
=
buffer
[
SoftMainMixer
.
CHANNEL_EFFECT1
];
SoftAudioBuffer
eff2
=
buffer
[
SoftMainMixer
.
CHANNEL_EFFECT2
];
SoftAudioBuffer
eff2
=
buffer
[
SoftMainMixer
.
CHANNEL_EFFECT2
];
SoftAudioBuffer
dleft
=
buffer
[
SoftMainMixer
.
CHANNEL_DELAY_LEFT
];
SoftAudioBuffer
dright
=
buffer
[
SoftMainMixer
.
CHANNEL_DELAY_RIGHT
];
SoftAudioBuffer
dmono
=
buffer
[
SoftMainMixer
.
CHANNEL_DELAY_MONO
];
SoftAudioBuffer
deff1
=
buffer
[
SoftMainMixer
.
CHANNEL_DELAY_EFFECT1
];
SoftAudioBuffer
deff2
=
buffer
[
SoftMainMixer
.
CHANNEL_DELAY_EFFECT2
];
SoftAudioBuffer
leftdry
=
buffer
[
SoftMainMixer
.
CHANNEL_LEFT_DRY
];
SoftAudioBuffer
leftdry
=
buffer
[
SoftMainMixer
.
CHANNEL_LEFT_DRY
];
SoftAudioBuffer
rightdry
=
buffer
[
SoftMainMixer
.
CHANNEL_RIGHT_DRY
];
SoftAudioBuffer
rightdry
=
buffer
[
SoftMainMixer
.
CHANNEL_RIGHT_DRY
];
...
@@ -799,42 +840,42 @@ public class SoftVoice extends VoiceStatus {
...
@@ -799,42 +840,42 @@ public class SoftVoice extends VoiceStatus {
if
(
nrofchannels
==
1
)
{
if
(
nrofchannels
==
1
)
{
out_mixer_left
=
(
out_mixer_left
+
out_mixer_right
)
/
2
;
out_mixer_left
=
(
out_mixer_left
+
out_mixer_right
)
/
2
;
mixAudioStream
(
leftdry
,
left
,
last_out_mixer_left
,
out_mixer_left
);
mixAudioStream
(
leftdry
,
left
,
dleft
,
last_out_mixer_left
,
out_mixer_left
);
if
(
rightdry
!=
null
)
if
(
rightdry
!=
null
)
mixAudioStream
(
rightdry
,
left
,
last_out_mixer_left
,
mixAudioStream
(
rightdry
,
left
,
dleft
,
last_out_mixer_left
,
out_mixer_left
);
out_mixer_left
);
}
else
{
}
else
{
if
(
rightdry
==
null
&&
if
(
rightdry
==
null
&&
last_out_mixer_left
==
last_out_mixer_right
&&
last_out_mixer_left
==
last_out_mixer_right
&&
out_mixer_left
==
out_mixer_right
)
out_mixer_left
==
out_mixer_right
)
{
{
mixAudioStream
(
leftdry
,
mono
,
last_out_mixer_left
,
out_mixer_left
);
mixAudioStream
(
leftdry
,
mono
,
dmono
,
last_out_mixer_left
,
out_mixer_left
);
}
}
else
else
{
{
mixAudioStream
(
leftdry
,
left
,
last_out_mixer_left
,
out_mixer_left
);
mixAudioStream
(
leftdry
,
left
,
dleft
,
last_out_mixer_left
,
out_mixer_left
);
if
(
rightdry
!=
null
)
if
(
rightdry
!=
null
)
mixAudioStream
(
rightdry
,
right
,
last_out_mixer_right
,
mixAudioStream
(
rightdry
,
right
,
dright
,
last_out_mixer_right
,
out_mixer_right
);
out_mixer_right
);
else
else
mixAudioStream
(
leftdry
,
right
,
last_out_mixer_right
,
mixAudioStream
(
leftdry
,
right
,
dright
,
last_out_mixer_right
,
out_mixer_right
);
out_mixer_right
);
}
}
}
}
if
(
rightdry
==
null
)
{
if
(
rightdry
==
null
)
{
mixAudioStream
(
leftdry
,
eff1
,
last_out_mixer_effect1
,
mixAudioStream
(
leftdry
,
eff1
,
deff1
,
last_out_mixer_effect1
,
out_mixer_effect1
);
out_mixer_effect1
);
mixAudioStream
(
leftdry
,
eff2
,
last_out_mixer_effect2
,
mixAudioStream
(
leftdry
,
eff2
,
deff2
,
last_out_mixer_effect2
,
out_mixer_effect2
);
out_mixer_effect2
);
}
else
{
}
else
{
mixAudioStream
(
leftdry
,
eff1
,
last_out_mixer_effect1
*
0.5f
,
mixAudioStream
(
leftdry
,
eff1
,
deff1
,
last_out_mixer_effect1
*
0.5f
,
out_mixer_effect1
*
0.5f
);
out_mixer_effect1
*
0.5f
);
mixAudioStream
(
leftdry
,
eff2
,
last_out_mixer_effect2
*
0.5f
,
mixAudioStream
(
leftdry
,
eff2
,
deff2
,
last_out_mixer_effect2
*
0.5f
,
out_mixer_effect2
*
0.5f
);
out_mixer_effect2
*
0.5f
);
mixAudioStream
(
rightdry
,
eff1
,
last_out_mixer_effect1
*
0.5f
,
mixAudioStream
(
rightdry
,
eff1
,
deff1
,
last_out_mixer_effect1
*
0.5f
,
out_mixer_effect1
*
0.5f
);
out_mixer_effect1
*
0.5f
);
mixAudioStream
(
rightdry
,
eff2
,
last_out_mixer_effect2
*
0.5f
,
mixAudioStream
(
rightdry
,
eff2
,
deff2
,
last_out_mixer_effect2
*
0.5f
,
out_mixer_effect2
*
0.5f
);
out_mixer_effect2
*
0.5f
);
}
}
...
...
test/javax/sound/midi/Gervill/SoftReceiver/GetMidiDevice.java
0 → 100644
浏览文件 @
ca2d21df
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Sun designates this
* particular file as subject to the "Classpath" exception as provided
* by Sun in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
/* @test
@summary Test SoftReceiver getMidiDevice method */
import
javax.sound.midi.Receiver
;
import
com.sun.media.sound.AudioSynthesizer
;
import
com.sun.media.sound.SoftReceiver
;
import
com.sun.media.sound.SoftSynthesizer
;
public
class
GetMidiDevice
{
public
static
void
main
(
String
[]
args
)
throws
Exception
{
AudioSynthesizer
synth
=
new
SoftSynthesizer
();
synth
.
openStream
(
null
,
null
);
Receiver
recv
=
synth
.
getReceiver
();
if
(((
SoftReceiver
)
recv
).
getMidiDevice
()
!=
synth
)
{
throw
new
Exception
(
"SoftReceiver.getMidiDevice() doesn't return "
+
"instance of the synthesizer"
);
}
synth
.
close
();
}
}
\ No newline at end of file
test/javax/sound/midi/Gervill/SoftSynthesizer/TestPreciseTimestampRendering.java
0 → 100644
浏览文件 @
ca2d21df
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Sun designates this
* particular file as subject to the "Classpath" exception as provided
* by Sun in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
/* @test
@summary Test rendering when using precise timestamps */
import
java.util.Arrays
;
import
java.util.Random
;
import
javax.sound.midi.MidiChannel
;
import
javax.sound.midi.Receiver
;
import
javax.sound.midi.ShortMessage
;
import
javax.sound.midi.Soundbank
;
import
javax.sound.sampled.AudioFormat
;
import
javax.sound.sampled.AudioInputStream
;
import
com.sun.media.sound.AudioFloatConverter
;
import
com.sun.media.sound.AudioSynthesizer
;
import
com.sun.media.sound.ModelAbstractChannelMixer
;
import
com.sun.media.sound.ModelChannelMixer
;
import
com.sun.media.sound.SF2Instrument
;
import
com.sun.media.sound.SF2InstrumentRegion
;
import
com.sun.media.sound.SF2Layer
;
import
com.sun.media.sound.SF2LayerRegion
;
import
com.sun.media.sound.SF2Sample
;
import
com.sun.media.sound.SF2Soundbank
;
import
com.sun.media.sound.SimpleInstrument
;
import
com.sun.media.sound.SimpleSoundbank
;
import
com.sun.media.sound.SoftSynthesizer
;
public
class
TestPreciseTimestampRendering
{
public
static
AudioFormat
format
=
new
AudioFormat
(
44100
,
16
,
1
,
true
,
false
);
public
static
SF2Soundbank
createTestSoundbank
()
{
// Create impulse instrument
// used to measure timing of note-on playback
SF2Soundbank
soundbank
=
new
SF2Soundbank
();
float
[]
data
=
new
float
[
100
];
Arrays
.
fill
(
data
,
0
);
data
[
0
]
=
1.0f
;
byte
[]
bdata
=
new
byte
[
data
.
length
*
format
.
getFrameSize
()];
AudioFloatConverter
.
getConverter
(
format
).
toByteArray
(
data
,
bdata
);
SF2Sample
sample
=
new
SF2Sample
(
soundbank
);
sample
.
setName
(
"Test Sample"
);
sample
.
setData
(
bdata
);
sample
.
setSampleRate
((
long
)
format
.
getSampleRate
());
sample
.
setOriginalPitch
(
69
);
soundbank
.
addResource
(
sample
);
SF2Layer
layer
=
new
SF2Layer
(
soundbank
);
layer
.
setName
(
"Test Layer"
);
soundbank
.
addResource
(
layer
);
SF2LayerRegion
region
=
new
SF2LayerRegion
();
region
.
setSample
(
sample
);
layer
.
getRegions
().
add
(
region
);
SF2Instrument
ins
=
new
SF2Instrument
(
soundbank
);
ins
.
setName
(
"Test Instrument"
);
soundbank
.
addInstrument
(
ins
);
SF2InstrumentRegion
insregion
=
new
SF2InstrumentRegion
();
insregion
.
setLayer
(
layer
);
ins
.
getRegions
().
add
(
insregion
);
return
soundbank
;
}
public
static
Soundbank
createTestSoundbankWithChannelMixer
()
{
SF2Soundbank
soundbank
=
createTestSoundbank
();
SimpleSoundbank
simplesoundbank
=
new
SimpleSoundbank
();
SimpleInstrument
simpleinstrument
=
new
SimpleInstrument
()
{
public
ModelChannelMixer
getChannelMixer
(
MidiChannel
channel
,
AudioFormat
format
)
{
return
new
ModelAbstractChannelMixer
()
{
boolean
active
=
true
;
public
boolean
process
(
float
[][]
buffer
,
int
offset
,
int
len
)
{
for
(
int
i
=
0
;
i
<
buffer
.
length
;
i
++)
{
float
[]
cbuffer
=
buffer
[
i
];
for
(
int
j
=
0
;
j
<
cbuffer
.
length
;
j
++)
{
cbuffer
[
j
]
=
-
cbuffer
[
j
];
}
}
return
active
;
}
public
void
stop
()
{
active
=
false
;
}
};
}
};
simpleinstrument
.
add
(
soundbank
.
getInstruments
()[
0
]);
simplesoundbank
.
addInstrument
(
simpleinstrument
);
return
simplesoundbank
;
}
public
static
void
main
(
String
[]
args
)
throws
Exception
{
test
(
createTestSoundbank
());
test
(
createTestSoundbankWithChannelMixer
());
}
public
static
void
test
(
Soundbank
soundbank
)
throws
Exception
{
// Create instance of synthesizer using the testing soundbank above
AudioSynthesizer
synth
=
new
SoftSynthesizer
();
AudioInputStream
stream
=
synth
.
openStream
(
format
,
null
);
synth
.
unloadAllInstruments
(
synth
.
getDefaultSoundbank
());
synth
.
loadAllInstruments
(
soundbank
);
Receiver
recv
=
synth
.
getReceiver
();
// Set volume to max and turn reverb off
ShortMessage
reverb_off
=
new
ShortMessage
();
reverb_off
.
setMessage
(
ShortMessage
.
CONTROL_CHANGE
,
91
,
0
);
recv
.
send
(
reverb_off
,
-
1
);
ShortMessage
full_volume
=
new
ShortMessage
();
full_volume
.
setMessage
(
ShortMessage
.
CONTROL_CHANGE
,
7
,
127
);
recv
.
send
(
full_volume
,
-
1
);
Random
random
=
new
Random
(
3485934583945
l
);
// Create random timestamps
long
[]
test_timestamps
=
new
long
[
30
];
for
(
int
i
=
1
;
i
<
test_timestamps
.
length
;
i
++)
{
test_timestamps
[
i
]
=
i
*
44100
+
(
int
)
(
random
.
nextDouble
()
*
22050.0
);
}
// Send midi note on message to synthesizer
for
(
int
i
=
0
;
i
<
test_timestamps
.
length
;
i
++)
{
ShortMessage
midi_on
=
new
ShortMessage
();
midi_on
.
setMessage
(
ShortMessage
.
NOTE_ON
,
69
,
127
);
recv
.
send
(
midi_on
,
(
long
)
((
test_timestamps
[
i
]
/
44100.0
)
*
1000000.0
));
}
// Measure timing from rendered audio
float
[]
fbuffer
=
new
float
[
100
];
byte
[]
buffer
=
new
byte
[
fbuffer
.
length
*
format
.
getFrameSize
()];
long
firsts
=
-
1
;
int
counter
=
0
;
long
s
=
0
;
long
max_jitter
=
0
;
outerloop:
for
(
int
k
=
0
;
k
<
10000000
;
k
++)
{
stream
.
read
(
buffer
);
AudioFloatConverter
.
getConverter
(
format
).
toFloatArray
(
buffer
,
fbuffer
);
for
(
int
i
=
0
;
i
<
fbuffer
.
length
;
i
++)
{
if
(
fbuffer
[
i
]
!=
0
)
{
if
(
firsts
==
-
1
)
firsts
=
s
;
long
measure_time
=
(
s
-
firsts
);
long
predicted_time
=
test_timestamps
[
counter
];
long
jitter
=
Math
.
abs
(
measure_time
-
predicted_time
);
if
(
jitter
>
10
)
max_jitter
=
jitter
;
counter
++;
if
(
counter
==
test_timestamps
.
length
)
break
outerloop
;
}
s
++;
}
}
synth
.
close
();
if
(
counter
==
0
)
throw
new
Exception
(
"Nothing was measured!"
);
if
(
max_jitter
!=
0
)
{
throw
new
Exception
(
"Jitter has occurred! "
+
"(max jitter = "
+
max_jitter
+
")"
);
}
}
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录