diff --git a/api.js b/api.js index 14c3f06f..1576796e 100644 --- a/api.js +++ b/api.js @@ -239,3 +239,59 @@ export function mutateSaveAudioState({ slug, assignmentId }) { return json; }; } + +export async function getInstrumentConfigurations() { + const endpoint = "configs/"; + const json = await makeRequest(endpoint); + return json; +} + +// When audioFile is provided, sends as multipart/form-data so Django's FileField can process it. +// Otherwise sends as JSON like before. +export async function mutateInstrumentConfiguration(config_id, instrument_config_update, audioFile = null) { + const endpoint = `configs/${config_id}/`; + if (audioFile) { + const formData = buildConfigFormData(instrument_config_update, audioFile); + return makeFormDataRequest(endpoint, 'PATCH', formData); + } + const json = await makeRequest(endpoint, 'PATCH', instrument_config_update); + return json; +} + +export async function createInstrumentConfiguration(instrument_config, audioFile = null) { + const endpoint = `configs/`; + if (audioFile) { + const formData = buildConfigFormData(instrument_config, audioFile); + return makeFormDataRequest(endpoint, 'POST', formData); + } + const json = await makeRequest(endpoint, 'POST', instrument_config); + return json; +} + +function buildConfigFormData(config, audioFile) { + const formData = new FormData(); + formData.append('name', config.name); + formData.append('description', config.description); + formData.append('settings', JSON.stringify(config.settings)); + formData.append('file', audioFile); + return formData; +} + +async function makeFormDataRequest(endpoint, method, formData) { + const token = await getDjangoToken(); + if (!token) return {}; + + const API = `${process.env.NEXT_PUBLIC_BACKEND_HOST}/api`; + const url = `${API}/${endpoint}`; + + const response = await fetch(url, { + method, + headers: { + Authorization: `Token ${token}`, + }, + body: formData, + }); + + assertResponse(response); + return response.json(); +} \ No newline at end of file diff --git a/components/adaptive.js b/components/adaptive.js new file mode 100644 index 00000000..56bc76bb --- /dev/null +++ b/components/adaptive.js @@ -0,0 +1,213 @@ +import Button from 'react-bootstrap/Button' +// tone doesn't have named exports +import { Sampler, Recorder, getDestination, loaded, start } from "tone"; +import { WebMidi } from "webmidi"; +import { useState, useEffect, useRef } from 'react'; + + +function Adpative() { + const recorder = useRef(null); + const sampler = useRef(null); + const [isSamplerLoaded, setSamplerLoaded] = useState(false); + // const [isToneLoaded, setToneLoaded] = useState(false); + const [hasPermission, setHasPermission] = useState(false); + // https://github.com/Tonejs/Tone.js/wiki/Using-Tone.js-with-React-React-Typescript-or-Vue + // https://dev.to/ericsonwillians/ive-built-my-own-synthesizer-using-tonejs-and-react-293f + + const webmidiInput = useRef(null) + + useEffect(() => { + const handleKeyDown = (event) => { + console.log("Key pressed:", event.key); + if (event.key in keyboardMap) { + onNote(keyboardMap[event.key]); + } + } + window.addEventListener("keydown", handleKeyDown); + recorder.current = new Recorder(); + getDestination().connect(recorder.current); + + sampler.current = new Sampler({ + + C5: "/audio/viola_c5.wav", + A4: "/audio/viola_a4.wav", + B4: "/audio/viola_b4.wav", + D4: "/audio/viola_d4.wav", + E4: "/audio/viola_e4.wav", + F4: "/audio/viola_f4.wav", + G4: "/audio/viola_g4.wav", + }, { + onload: () => { + setSamplerLoaded(true); + } + }).toDestination(); + return () => window.removeEventListener("keydown", handleKeyDown); + },[]); + + const keyboardMap = { + 'a': { + qwerty: 'a', + note: { + name: 'C', + octave: 4, + accidental: undefined, + } + }, + 'w': { + qwerty: 'w', + note: { + name: 'C', + octave: 4, + accidental: '#', + } + }, + 's': { + qwerty: 's', + note: { + name: 'D', + octave: 4, + accidental: undefined, + } + }, + 'e': { + qwerty: 'e', + note: { + name: 'D', + octave: 4, + accidental: '#', + } + }, + 'd': { + qwerty: 'd', + note: { + name: 'E', + octave: 4, + accidental: undefined, + } + }, + 'f': { + qwerty: 'f', + note: { + name: 'F', + octave: 4, + accidental: undefined, + } + }, + 't': { + qwerty: 't', + note: { + name: 'F', + octave: 4, + accidental: '#', + } + }, + 'g': { + qwerty: 'g', + note: { + name: 'G', + octave: 4, + accidental: undefined, + } + }, + + 'y': { + qwerty: 'y', + note: { + name: 'G', + octave: 4, + accidental: '#', + } + }, + 'h': { + qwerty: 'h', + note: { + name: 'A', + octave: 4, + accidental: undefined, + } + }, + 'u': { + qwerty: 'u', + note: { + name: 'A', + octave: 4, + accidental: '#', + } + }, + 'j': { + qwerty: 'j', + note: { + name: 'B', + octave: 4, + accidental: undefined, + } + }, + 'k': { + qwerty: 'k', + note: { + name: 'C', + octave: 5, + accidental: undefined, + } + }, + +} + function onEnabled() { + + if (WebMidi.inputs.length < 1) { + console.error('tried to give permission, but no inputs') + } else { + setHasPermission(true); + WebMidi.inputs.forEach((device, index) => { + console.log(device.name); + console.log(index); + }); + webmidiInput.current = WebMidi.inputs[0]; // FIXME: we need to list the inputs from the loop above in the config ui so the user can select their thing + webmidiInput.current.channels[1].addListener("noteon", onNote); + + } + +} + //TODO: Have the + function onNote(e) { + const accidental = e.note.accidental + let note = e.note.name; + if (accidental != undefined) { + note += e.note.accidental; + } + note += e.note.octave; + console.log(e); + // sampler.current.triggerAttackRelease(note, 4); + + loaded().then(() => { + sampler.current.triggerAttackRelease(note, 4); + }); +} + + + async function enableEverything() { + // TODO: We will tell the user to plug in the + await WebMidi.enable().catch((err) => { + console.log("error"); + alert(err); + return; + }); + await start(); + + onEnabled(); + + } + + return ( + <> + + + + + ); +} + + + + +export default Adpative; \ No newline at end of file diff --git a/components/recorder.js b/components/recorder.js index f8c9922c..040e1308 100644 --- a/components/recorder.js +++ b/components/recorder.js @@ -1,7 +1,6 @@ 'use client'; import { useCallback, useEffect, useRef, useState } from 'react'; -import { FaEdit, FaStop, FaMicrophone, FaRegTrashAlt } from 'react-icons/fa'; import { BiRename } from 'react-icons/bi'; import { Card, @@ -26,6 +25,30 @@ import DAW from './audio/DAW'; import { AudioDropModal } from './audio/silenceDetect'; import { catchSilence, setupAudioContext } from '../lib/dawUtils'; import StatusIndicator from './statusIndicator'; +import styles from '../styles/recorder.module.css'; +import { getInstrumentConfigurations, mutateInstrumentConfiguration, createInstrumentConfiguration } from "../api"; +import MicRecorder from 'mic-recorder-to-mp3'; +import { IoSettingsSharp } from "react-icons/io5"; +import Modal from 'react-bootstrap/Modal' +import { Sampler, Recorder as toneRecorder, getDestination, loaded, start, Midi } from "tone"; +import { WebMidi } from "webmidi"; +import { + FaEdit, + FaMicrophone, + FaStop, + FaCloudUploadAlt, + FaSpinner, + FaTimesCircle, + FaCheck, + FaPlay, + FaPause, + FaVolumeOff, + FaVolumeMute, + FaVolumeDown, + FaVolumeUp, + FaRegTrashAlt, +} from 'react-icons/fa'; +import WaveSurfer from 'wavesurfer.js'; // Create a silent audio buffer as scratch audio to initialize wavesurfer const createSilentAudio = () => { @@ -252,7 +275,7 @@ export default function RecorderRefactored({ submit, accompaniment, logOperation return false; } }; - + window.addEventListener('error', handleError); window.addEventListener('unhandledrejection', (event) => { if (event.reason && event.reason.name === 'AbortError') { @@ -260,12 +283,12 @@ export default function RecorderRefactored({ submit, accompaniment, logOperation console.log('Suppressed expected AbortError promise rejection'); } }); - + return () => { window.removeEventListener('error', handleError); }; }, []); - + // Initialize audio URL with scratch audio useEffect(() => { if (!audioURL) { @@ -355,7 +378,7 @@ export default function RecorderRefactored({ submit, accompaniment, logOperation recorder.onerror = (event) => { console.log('MediaRecorder error suppressed:', event.error?.name || 'unknown'); }; - + recorder.onstop = () => { console.log('MediaRecorder.onstop called'); const blob = new Blob(chunksRef.current, { type: supportedType }); @@ -469,12 +492,12 @@ export default function RecorderRefactored({ submit, accompaniment, logOperation mediaRecorder.start(10); setIsRecording(true); }, [isBlocked, mediaRecorder, accompanimentRef, chunksRef, setIsRecording]); - + const stopRecording = useCallback(async () => { try { if (accompanimentRef.current) { accompanimentRef.current.pause(); - + // Use a safer approach to reset audio try { if (accompanimentRef.current.readyState >= 1) { @@ -622,8 +645,8 @@ export default function RecorderRefactored({ submit, accompaniment, logOperation {/* eslint-disable-next-line jsx-a11y/media-has-caption */} -