In principle it should be possible because the lib on Github sound writes:
(But I still don’t have it working.)
How to build
git clone git@github.com:processing/processing-sound.git
- (optional: copy (or soft-link)
processing-core.zip
from your local Processing for Android mode as well as your Android SDK’s android.jar
, API level 26 or higher, into the library/
folder. If you don’t do this, these will be downloaded from GitHub instead. Note that as of version 2.2 the sound library is compiled against Processing’s Android mode rather than the normal Processing core.jar
in order to more smoothly support AudioIn
on Android. Other dependencies, in particular Phil Burk’s JSyn engine on which this library is based, are also all downloaded automatically by ant.)
ant dist
(or, alternatively, run build.xml
from within Eclipse)
The resulting sound.zip
can be extracted into your Processing installation’s libraries/
>folder.
In the meantime, I’m trying to understand how sound frequency is generated. on Android.
So far I have this code working on my Android 10 device, but strangely it doesn’t work on my Lollipop. Maybe you can help me by running this code on your device, and tell me if there are problems and if you like my song.
import android.media.AudioTrack;
import android.media.AudioManager;
import android.media.AudioFormat;
import java.lang.SuppressWarnings;
// Index of notes in in note_frequencies array
int Gl = 0, A = 1, B = 2, C = 3, D = 4, E = 5, G = 6, Ah = 7, Bh = 8, Ch = 9;
// Gl, A, B, C, D, E, G, Ah, Bh, Ch
int[] note_frequencies = {392, 440, 494, 523, 587, 659, 784, 880, 988, 1046};
int[] song_notes = {A, A, E, D, C, B, B, B, D, C, B, A, A, Ch, Bh, Ch, Bh, Ch, A, A, C, B, C, B, C}; // The song
int[] final_notes = {C, C, C, C, E, E, E, E, D, D, D, D, G, G, G, G, Ah, Ah, Ah, Ah, Ah, Ah, Ah, D, C, B, Gl, A, A, A};
// index of long notes in notes array
int[] note = {1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0};
int sample_rate = 44100;
AudioTrack[] audio_track = new AudioTrack[note_frequencies.length];
void setup() {
background(0, 0, 200);
fill(255);
textAlign(CENTER, CENTER);
String str = "Click to start song.";
textSize(12);
float twf = width/textWidth(str);
;
textSize(9*twf);
text(str, width/2, height/2);
int note_duration = sample_rate/5;
byte long_note[][] = new byte[note_frequencies.length][note_duration];
for (int ln = 0; ln < note_frequencies.length; ln++) {
for (int i = 0; i < long_note[ln].length; i++) {
byte sample = (byte)(sin(TWO_PI*note_frequencies[ln]*i/sample_rate)*255);
long_note[ln][i] = sample;
}
} //@SuppressWarnings("deprecation")
for (int ln = 0; ln < note_frequencies.length; ln++) {
audio_track[ln] = new AudioTrack( AudioManager.STREAM_MUSIC, sample_rate, AudioFormat.CHANNEL_OUT_DEFAULT, AudioFormat.ENCODING_PCM_8BIT, long_note[ln].length, AudioTrack.MODE_STATIC );
audio_track[ln].write(long_note[ln], 0, long_note[0].length);
}
}
void draw() {
}
void mousePressed() {
for ( int i = 0; i < 2; i++) {
for (int sn = 0; sn < song_notes.length; sn++) {
audio_track[song_notes[sn]].play();
int start_time = millis();
int pause_between_notes;
if (note[sn] == 1) pause_between_notes = 350;
else pause_between_notes = 150;
while (millis()-start_time < pause_between_notes) {
};
if (i == 1 && sn == 18) break;
audio_track[song_notes[sn]].stop();
}
}
for (int fn = 0; fn < final_notes.length; fn++) {
int start_time = millis();
int pause_between_notes = 200;
audio_track[final_notes[fn]].play();
while (millis()-start_time < pause_between_notes) {
};
audio_track[final_notes[fn]].stop();
}
}
void onStop () {
for (int i = 0; i < audio_track.length; i++) {
audio_track[i].release();
audio_track[i] = null;
}
}
void onPause () {
for (int i = 0; i < audio_track.length; i++) {
audio_track[i].release();
audio_track[i] = null;
}
}