experimentations/loopy_code_woteva.py

257 lines
8.1 KiB
Python

#!/usr/bin/env python3
# Copyright (C) 2021 harrysentonbury
# GNU General Public License v3.0
# Chaos. But who cares
# thyaudio - https://codeberg.org/harrysentonbury/experimentations/src/branch/master/thyaudio.py
# REMEMBER - RUN FROM TERMINAL
import argparse
from audio2numpy import open_audio
import numpy as np
import scipy.io.wavfile as wf
import sounddevice as sd
import scipy.signal as sig
import thyaudio
import time
parser = argparse.ArgumentParser(prog="LOOPY_WOTVA")
parser.add_argument("-i", "--infile", nargs="?", metavar="INFILE",
help="audio infile, wav or mp3", type=str, default="")
parser.add_argument("-b", "--bars_pm", nargs="?", metavar="BARS_PM",
help="bars per minute", type=int, default=43)
args = parser.parse_args()
def beater0(in_file):
sound_beat_0 = thyaudio.ThyRythmer(data=in_file, bar_size=dlen, decay_min=-1.3, flip=False,
decay_log_base=4, attack=400, pattern=[1, 0, 0, 0, 1, 0, 1, 0])
#decay_log_base=10, pattern=[0, 0, 0, 0, 1, 0, 1, 1, 0, 1])
return sound_beat_0.beater()
def beater1(in_file):
sound_beat_1 = thyaudio.ThyRythmer(data=in_file, bar_size=dlen, decay_min=-1.2, flip=False,
decay_log_base=8, pattern=[1, 0, 1, 0, 1, 0, 0, 1])
#decay_log_base=8, pattern=[1, 0, 1, 0, 0, 0, 0, 1, 0, 1])
return sound_beat_1.beater()
#------------------------------------------------------------------
# RIGHT
try:
woteva_file = args.infile
# read mp3 or wav with appropriate package.
if woteva_file.endswith(".mp3"):
data, sample_rate = open_audio(woteva_file)
else:
sample_rate, data = wf.read(woteva_file)
data = np.float64(data)
# make sure mono
try:
data_1 = data[:, 0] + data[:, 1]
except IndexError:
data_1 = data[:]
except FileNotFoundError:
print("Say stuff or sing into your microphone for a couple of seconds")
time.sleep(0.3)
print("After three")
time.sleep(0.75)
sample_rate = 44100
# Record.
print("one")
time.sleep(0.75)
print("two")
time.sleep(0.75)
print("three")
time.sleep(0.75)
print("GO")
time.sleep(0.2)
data_1 = sd.rec(int(3 * sample_rate),
samplerate=sample_rate, channels=1, dtype='float64').reshape(-1)
sd.wait()
print("Recording done")
bpm = args.bars_pm
samples_per_minute = sample_rate * 60
dlen = samples_per_minute // bpm
#data_1 = np.roll(data_1, 5000)
data_1 = data_1[:dlen]
#print(f"data shape: {data_1.shape}")
#-------------------------------------------------------------------
# LEFT
def swave2(f, det=0):
return np.sin(x * (f + det)) * 0.3
log_end = 1
factor_arr_len = 33 # needs to be >= iter2
iter2 = 3
freq2 = 55 * (2**(7 / 12)) # 7 makes this E2
x = np.linspace(0, 2 * np.pi * dlen / sample_rate, dlen)
ramp_0 = np.logspace(1, -1, np.size(x), base=8) * 6
ramp_1 = np.logspace(-1, 1, np.size(x), base=8) * 6
#left_wave = 2 / np.pi * np.arcsin(np.sin((110) * x + ramp_1 * (np.sin(x * 220) * 0.8))) * 0.3
left_wave = 2 / np.pi * np.arcsin(np.sin((220) * x + ramp_0 * (np.sin(220 * x) * 0.5))) * 0.3
factor = np.logspace(1, log_end, factor_arr_len, base=1.02)
for i in range(iter2):
left_wave = left_wave + swave2(factor[i] * freq2)
#---------------------------------------------------------------
# LEFT and RIGHT
# squeek (code from simmer_bass)
def triangler(freq, form=0.9):
y = sig.sawtooth(x * freq, form)
return y
def filtering_butter(data, cru):
sos1 = sig.butter(10, cru, btype='hp', fs=sample_rate, output='sos')
filt = sig.sosfilt(sos1, data)
return filt
freq_simmer = 880.0 * (2**(20 / 12)) # 20 makes this F7
simmer = (triangler(freq_simmer) + triangler(freq_simmer + 2) +
triangler(freq_simmer - 2) + np.sin(x * freq_simmer * 0.5 + (np.sin(x * (freq_simmer+1)) * 0.5))) * 0.3
simmer_beat_obj = thyaudio.ThyRythmer(data=simmer, bar_size=sample_rate, decay_min=-1,
flip=True, decay_log_base=8, attack=200,
pattern=[0, 1, 1, 0])
simmer_beat = simmer_beat_obj.beater()
#---------------------------------------------------------
# LOOPS
repeats = 8
left_wave_beat = beater0(left_wave)
sound_0 = np.array([])
for i in range(repeats):
num_loops = (i + 2)
phase_l = thyaudio.ThyPhlazer(left_wave_beat, sample_rate=sample_rate, loops=num_loops, loop_delay=400)
phase_l.depth=(i + 1) * 100
phase_l.sweep=True
phase_l.speed = 0.5
left_channel = phase_l.phlaze() * 0.9
cr = (i + 2) * 450
filt_simmer_beat = filtering_butter(simmer_beat, cr)
sound_0 = np.concatenate((sound_0, left_channel + (filt_simmer_beat * 0.05)))
data_1_beat = beater1(data_1)
sound_1 = np.array([])
for _ in range(repeats):
cr = (i + 2) * 550
filt_simmer_beat = filtering_butter(simmer_beat, cr)
sound_1 = np.concatenate((sound_1, data_1_beat + (filt_simmer_beat * 0.05)))
for i in range(repeats + 3, 0, -1):
num_loops = (i + 2)
phase_l = thyaudio.ThyPhlazer(left_wave_beat, sample_rate=sample_rate, loops=num_loops, loop_delay=400)
phase_l.depth=(i + 1) * 140
phase_l.sweep=True
phase_l.speed = 0.5
left_channel = phase_l.phlaze()
cr = (i + 2) * 750
filt_simmer_beat = filtering_butter(simmer_beat, cr)
sound_0 = np.concatenate((sound_0, left_channel + (filt_simmer_beat * 0.1)))
for _ in range(repeats + 3):
num_loops = (i + 1)
phase_r = thyaudio.ThyPhlazer(data_1_beat, sample_rate=sample_rate, loops=num_loops, loop_delay=(i + 1) * 800)
phase_r.depth=(i + 1) * 150#1000
phase_r.sweep=True
phase_r.speed = 2.75
phase_r.phase = np.pi
right_channel = phase_r.phlaze()
cr = (i + 2) * 650
filt_simmer_beat = filtering_butter(simmer_beat, cr)
sound_1 = np.concatenate((sound_1, right_channel + (filt_simmer_beat * 0.1)))
#------------------------------------------------------------
# LEFT and RIGHT
# kids piano from brown_8.mp3
# But you put the path to whatever sound you like here (mp3 or wav)
path_to_file = "audio/brown_8.mp3"
# read mp3 or wav with appropriate package.
if path_to_file.endswith(".mp3"):
data, sample_rate_2 = open_audio(path_to_file)
else:
sample_rate_2, data = wf.read(path_to_file)
data = np.float64(data)
# process two tracks seperately, even if mono
try:
if data.shape[1] == 2:
data_l = data[:, 0]
data_r = data[:, 1]
except IndexError:
data_l = data[:]
data_r = data[:]
# double this small sound file a few of times
for _ in range(3):
data_l = np.concatenate((data_l, data_l))
data_r = np.concatenate((data_r, data_r))
# chop up and shuffle
window_size = 2**15
shuffle_object_l = thyaudio.ThyRandomAudioSlices(data_l, window_size, fade_length=500)
data_l = shuffle_object_l.chop_and_chuck()
shuffle_object_r = thyaudio.ThyRandomAudioSlices(data_r, window_size, fade_length=500)
data_r = shuffle_object_r.chop_and_chuck()
data_r = np.roll(data_r, 8000)
# Phlazing chopped kids piano
num_loops = 4
phase_l = thyaudio.ThyPhlazer(data_l, sample_rate=sample_rate, loops=num_loops, loop_delay=4000)
phase_l.depth=200
phase_l.sweep=True
phase_l.speed = 0.5
left_channel = phase_l.phlaze() * 0.1
left_channel *= np.hanning(np.size(left_channel))
# Add processed left kids piano to left loops array
sound_0[-np.size(left_channel):] += left_channel
sound_0[:np.size(left_channel)] += (left_channel * 0.2)
phase_r = thyaudio.ThyPhlazer(data_r, sample_rate=sample_rate, loops=num_loops, loop_delay=6000)
phase_r.depth=1000
phase_r.sweep=True
phase_r.speed = 0.5
phase_r.phase = np.pi / 12
right_channel = phase_r.phlaze() * 0.1
right_channel *= np.hanning(np.size(right_channel))
# Add processed right kids piano to right loops array
sound_1[-np.size(right_channel):] += right_channel
sound_1[:np.size(right_channel)] += (right_channel * 0.2)
#-----------------------------------------------------------------
# Make a proper stereo array of shape (n_samples_per_channel, 2)
sound_stereo = np.vstack((sound_0, sound_1)).T
sound_stereo = sound_stereo / np.max(np.abs(sound_stereo))
sd.play(sound_stereo, sample_rate)
sd.wait()
# write_data = np.int16(sound_stereo * 32767)
# wf.write('audio/wotever.wav', sample_rate, write_data)