Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

请教下webtrc实时音频的一些问题,代码如下 #336

Open
lin-jj-engine opened this issue Dec 24, 2024 · 3 comments
Open

请教下webtrc实时音频的一些问题,代码如下 #336

lin-jj-engine opened this issue Dec 24, 2024 · 3 comments

Comments

@lin-jj-engine
Copy link

lin-jj-engine commented Dec 24, 2024

import wave

import numpy as np
from aiortc import RTCPeerConnection, RTCSessionDescription, AudioStreamTrack
from aiohttp import web
import aiohttp_cors
import asyncio

from av import AudioFrame


class AudioFileTrack(AudioStreamTrack):
    """
    从 WAV 文件中读取音频数据的自定义音频轨道。
    """
    kind = "audio"

    def __init__(self, filename: str):
        super().__init__()
        self._filename = filename
        self._wavefile = wave.open(self._filename, "rb")  # 打开WAV文件
        self._frame_rate = self._wavefile.getframerate()  # 获取帧率
        self._frame_size = self._wavefile.getnframes()  # 获取文件的总帧数
        print(self._frame_rate,self._frame_size)
        self._timestamp = 0  # 初始化时间戳

    async def recv(self):
        """
        从 WAV 文件读取音频帧,并返回音频帧。
        """
        print(2)
        # 从 WAV 文件读取音频数据
        frames = self._wavefile.readframes(320)  # 读取 1024 帧数据
        if len(frames) == 0:
            # 如果没有数据了,返回空的音频帧
            return AudioFrame(format="s16", layout="mono", samples=0)

        # 构造音频帧
        frame = AudioFrame(format="s16", layout="mono", samples=320)
        frame.pts = self._timestamp
        print(self._timestamp/16000)
        frame.sample_rate = self._frame_rate
        frame.time_base = fractions.Fraction(1, self._frame_rate)
        print(frame.time_base)
        self._timestamp += 320  # 增加时间戳
        return frame


async def offer(request):
    params = await request.json()
    offer_sdp = params['offer']

    pc = RTCPeerConnection()

    # 创建视频流轨道
    video_track = AudioFileTrack("test.wav")  # 设置视频路径
    pc.addTrack(video_track)
    # 设置远程描述
    offer = RTCSessionDescription(sdp=offer_sdp, type=params['type'])
    await pc.setRemoteDescription(offer)

    # 创建并发送答案
    answer = await pc.createAnswer()
    await pc.setLocalDescription(answer)
    return web.json_response({"sdp": pc.localDescription.sdp, "type": pc.localDescription.type})


async def init():
    app = web.Application()
    cors = aiohttp_cors.setup(app, defaults={
        "*": aiohttp_cors.ResourceOptions(
            allow_credentials=True,
            expose_headers="*",
            allow_headers="*",
            # allow_methods="*",
        )
    })
    resource = cors.add(app.router.add_resource("/offer"))
    cors.add(resource.add_route("POST", offer))
    runner = web.AppRunner(app)
    await runner.setup()
    site = web.TCPSite(runner, '0.0.0.0', 8900)
    await site.start()


print('------- Http server started at localhost:8900 --------')

# def main():
if __name__ == '__main__':
    loop = asyncio.new_event_loop()  # get_event_loop()
    loop.run_until_complete(init())
    loop.run_forever()
@lin-jj-engine
Copy link
Author

前端偶尔能播放出音频,也写过视频的,视频也是偶尔会播放,是为什么呢,实在是不太懂,想问大佬请教下,感谢

@lin-jj-engine
Copy link
Author

<html lang="en">
	<head>
		<meta charset="UTF-8">
		<meta name="viewport" content="width=device-width, initial-scale=1.0">
		<title>WebRTC Streaming</title>
	</head>
	<body>
		<h2>WebRTC Video Streaming</h2>
		<video id="video" autoplay playsinline></video> <!-- 用于显示视频流 -->
		<h2>WebRTC Video Streaming</h2>
		<audio id="remoteAudio" autoplay></audio> <!-- 用于播放音频流 -->
		<button onclick="startAudio()">Play Audio</button>
		<h2>WebRTC Video Streaming</h2>
		<script>
			const videoElement = document.getElementById("video");
			const remoteAudio = document.getElementById('remoteAudio');
			remoteAudio.muted = false;
			let pc = null;

			// 配置 STUN 服务器(Google 提供的公共 STUN 服务器)
			const config = {
				iceServers: [{
						urls: "stun:stun1.l.google.com:19302"
					} // 公共 STUN 服务器
				]
			};

			// 创建 RTCPeerConnection
			async function start() {
				pc = new RTCPeerConnection(config);

				// 接收并显示视频轨道
				pc.addEventListener('track', (event) => {

					if (event.track.kind === 'audio') {
						console.log('Audio track received.');
						remoteAudio.srcObject = event.streams[0]; // 播放音频流
						remoteAudio.play().catch((err) => {
							console.error('Error playing audio:', err);
						});
						console.log('Stream', event.streams[0]);
						console.log('Stream Active:', event.streams[0].active);
						console.log('Stream Tracks:', event.streams[0].getTracks());

					}
				});

				// 创建本地 offer,并发送给服务器
				const offer = await pc.createOffer({
					offerToReceiveVideo: true,
					offerToReceiveAudio: true
				});
				await pc.setLocalDescription(offer);

				// 向后端发送 offer 并接收 answer
				const response = await fetch('http://localhost:8900/offer', {
					method: 'POST',
					headers: {
						'Content-Type': 'application/json'
					},
					body: JSON.stringify({
						offer: pc.localDescription.sdp,
						type: pc.localDescription.type
					})
				});

				const data = await response.json();

				// 设置远程描述(answer)
				const answer = new RTCSessionDescription({
					type: 'answer',
					sdp: data.sdp
				});
				await pc.setRemoteDescription(answer);
			}

			function startAudio() {
				const remoteAudio = document.getElementById('remoteAudio');
				remoteAudio.play().catch(error => {
					console.log('Audio play failed:', error);
				});
			}
			// 启动 WebRTC 流
			start();
		</script>
	</body>
</html>

@lipku
Copy link
Owner

lipku commented Dec 24, 2024

参考一下aiortc里的例子

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants