Skip to content

Instantly share code, notes, and snippets.

@rust-play
Created August 22, 2023 03:35
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save rust-play/44f0864a5855d164e919aaa0ff367b8f to your computer and use it in GitHub Desktop.
Save rust-play/44f0864a5855d164e919aaa0ff367b8f to your computer and use it in GitHub Desktop.
Code shared from the Rust Playground
use std::time::Duration;
use crossbeam::channel::{Receiver, Sender};
use ffmpeg::{format, media, ChannelLayout};
use ffmpeg_next as ffmpeg;
use sdl2::{
audio::{AudioCVT, AudioCallback, AudioFormat, AudioSpecDesired},
event::Event,
keyboard::Keycode,
pixels::Color,
rect::Rect,
};
use spectrum_analyzer::{
samples_fft_to_spectrum, scaling::divide_by_N_sqrt, windows::hann_window, FrequencyLimit,
};
fn map_range(value: f32, from_range: (f32, f32), to_range: (f32, f32)) -> f32 {
let from_min = from_range.0;
let from_max = from_range.1;
let to_min = to_range.0;
let to_max = to_range.1;
// 确保 value 在 from_range 范围内
let clamped_value = value.max(from_min).min(from_max);
// 计算映射后的值
((clamped_value - from_min) / (from_max - from_min)) * (to_max - to_min) + to_min
}
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
let video_subsystem = sdl_context.video()?;
let width = 1920;
let height = 600;
let window = video_subsystem
.window("rust-sdl2 demo: Audio", width, height)
.position_centered()
.opengl()
.build()
.map_err(|e| e.to_string())?;
let mut canvas = window.into_canvas().build().map_err(|e| e.to_string())?;
canvas.set_draw_color(Color::RGB(0, 0, 0));
canvas.clear();
canvas.present();
let mut event_pump = sdl_context.event_pump()?;
let audio_subsystem = sdl_context.audio()?;
let (new_audio_tx, new_audio_rx) = crossbeam::channel::bounded(1);
let (tx, rx) = crossbeam::channel::bounded(1);
let (mag_tx, mag_rx) = crossbeam::channel::unbounded();
std::thread::spawn(move || {
init_ffmpeg(tx, mag_tx, new_audio_tx).expect("ffmpeg error");
});
let mut _device = None;
'running: loop {
if let Ok(asd) = new_audio_rx.try_recv() {
let rx = rx.clone();
let device = audio_subsystem
.open_playback(None, &asd, |spec| {
println!("spec.format = {:?}", spec);
Sound { rx, volume: 0.7 }
})
.unwrap();
device.resume();
_device = Some(device);
}
for event in event_pump.poll_iter() {
match event {
Event::Quit { .. }
| Event::KeyDown {
keycode: Some(Keycode::Escape),
..
} => break 'running,
_ => {}
}
}
if let Ok((left_fr, right_fr)) = mag_rx.try_recv() {
let line_num = 640;
let channel_num = line_num / 2;
if left_fr.len() > channel_num && right_fr.len() > channel_num {
canvas.set_draw_color(Color::RGB(16, 29, 43));
canvas.clear();
canvas.set_draw_color(Color::RGB(123, 0, 0));
let rw = (width as usize) / line_num;
for i in 0..line_num {
let fr = if i >= channel_num {
right_fr[i - channel_num]
} else {
left_fr[channel_num - 1 - i]
};
let height = map_range(fr, (0.0, 1.0), (0.0, 500.0)) as u32;
let y = 600 - height;
let rect = Rect::new(i as i32 * rw as i32, y as i32, rw as u32, height);
canvas.fill_rect(rect)?;
canvas.draw_rect(rect)?;
}
}
}
canvas.present();
::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / 60));
}
Ok(())
}
struct Sound {
rx: Receiver<Vec<u8>>,
volume: f32,
}
impl AudioCallback for Sound {
type Channel = u8;
fn callback(&mut self, out: &mut [u8]) {
if let Ok(data) = self.rx.try_recv() {
for (pos, dst) in out.iter_mut().enumerate() {
let pre_scale = *data.get(pos).unwrap_or(&128);
let scaled_signed_float = (pre_scale as f32 - 128.0) * self.volume;
let scaled = (scaled_signed_float + 128.0) as u8;
*dst = scaled;
}
} else {
for dst in out.iter_mut() {
*dst = 128;
}
}
}
}
fn init_ffmpeg(
tx: Sender<Vec<u8>>,
mag_tx: Sender<(Vec<f32>, Vec<f32>)>,
new_audio_tx: Sender<AudioSpecDesired>,
) -> Result<(), ffmpeg::Error> {
let mut args = std::env::args();
args.next();
let Some(music) = args.next() else {
eprintln!("请传入音频文件参数");
std::process::exit(1);
};
let mut input_context = ffmpeg::format::input(&music)?;
let audio_input = input_context
.streams()
.best(media::Type::Audio)
.ok_or(ffmpeg::Error::StreamNotFound)?;
let stream_index = audio_input.index();
let context_decoder =
ffmpeg::codec::context::Context::from_parameters(audio_input.parameters())?;
let mut decoder = context_decoder.decoder().audio()?;
println!(
"audio rate = {}, channels = {}, sample = {:?}",
decoder.rate(),
decoder.channels(),
decoder.format()
);
let samples = 2048usize;
let desired_spec = AudioSpecDesired {
freq: Some(decoder.rate() as i32),
channels: Some(decoder.channels() as u8), // mono
samples: Some(samples as u16), // default
};
new_audio_tx.send(desired_spec).unwrap();
let mut audio_frame = ffmpeg::frame::Audio::empty();
let cvt = AudioCVT::new(
AudioFormat::S16LSB,
decoder.channels() as u8,
decoder.rate() as i32,
AudioFormat::U8,
decoder.channels() as u8,
decoder.rate() as i32,
)
.expect("Could not convert WAV file");
let mut frame_buf = Vec::with_capacity(10240);
for (stream, packet) in input_context.packets() {
if stream.index() == stream_index {
decoder.send_packet(&packet)?;
while decoder.receive_frame(&mut audio_frame).is_ok() {
let mut sample_convert = ffmpeg::software::resampler(
(
audio_frame.format(),
audio_frame.channel_layout(),
audio_frame.rate(),
),
(
format::Sample::I16(format::sample::Type::Packed),
ChannelLayout::STEREO,
audio_frame.rate(),
),
)?;
let mut audio_convert_frame = ffmpeg::frame::Audio::empty();
sample_convert.run(&audio_frame, &mut audio_convert_frame)?;
let convert_sample = cvt.convert(audio_convert_frame.data(0).to_vec());
frame_buf.extend(convert_sample);
loop {
if frame_buf.len() >= samples * 2 {
let convert_sample: Vec<u8> = frame_buf.drain(0..samples * 2).collect();
let left_samples: Vec<f32> = convert_sample
.iter()
.map(|&sample| (sample as f32 - 127.5) / 127.5)
.enumerate()
.filter(|(i, _)| i % 2 == 0)
.map(|(_, v)| v)
.collect();
let right_samples: Vec<f32> = convert_sample
.iter()
.map(|&sample| (sample as f32 - 127.5) / 127.5)
.enumerate()
.filter(|(i, _)| i % 2 == 1)
.map(|(_, v)| v)
.collect();
let left_fr_vals =
samples_to_fr(left_samples, samples / 2, audio_convert_frame.rate());
let right_fr_vals =
samples_to_fr(right_samples, samples / 2, audio_convert_frame.rate());
tx.send(convert_sample).unwrap();
mag_tx.send((left_fr_vals, right_fr_vals)).unwrap();
} else {
break;
}
}
}
}
}
Ok(())
}
fn samples_to_fr(mut f32_samples: Vec<f32>, num_samples: usize, rate: u32) -> Vec<f32> {
let next_power_of_two = (num_samples as f32).log2().ceil() as usize;
let new_num_samples = 2usize.pow(next_power_of_two as u32);
f32_samples.resize(new_num_samples, 0.0);
let f32_samples = hann_window(&f32_samples);
let spectrum_hann_window = samples_fft_to_spectrum(
// (windowed) samples
&f32_samples,
// sampling rate
rate,
// optional frequency limit: e.g. only interested in frequencies 50 <= f <= 150?
FrequencyLimit::All,
// optional scale
Some(&divide_by_N_sqrt),
)
.unwrap();
spectrum_hann_window
.data()
.iter()
.map(|(_, fr_val)| fr_val.val())
.collect()
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment