我有一个项目,编码的一个组成部分,但它的工作奇怪。
功能:4 pages, random id for signaling websocket server identification
验证码:
const Call = ({ navigation }) => {
const [localStream, setlocalStream] = useState(null)
const [remoteStream, setRemoteStream] = useState(new MediaStream())
const [localMicOn, setlocalMicOn] = useState(true)
const [localWebcamOn, setlocalWebcamOn] = useState(true)
const [type, setType] = useState('JOIN')
const [callerId] = useState(
Math.floor(100 + Math.random() * 900).toString(),
)
const otherUserId = useRef(null)
const socket = useRef(null)
const pc = useRef(new RTCPeerConnection({
iceServers: [{ urls: 'stun:stun.l.google.com:19302' }]
}))
const candidates = useRef([])
const remoteRTCMessage = useRef(null)
function answerCall(data) {
socket.current.emit('answerCall', data);
}
function sendCall(data) {
socket.current.emit('call', data);
}
function sendICEcandidate(data) {
socket.current.emit('iICEcandidate', data);
}
useEffect(() => {
socket.current = SocketIOClient('http://myserver.com:3500', {
transports: ['websocket'], query: { callerId }
})
socket.current.on('newCall', data => {
console.log('\n newCall data: \n', data)
remoteRTCMessage.current = data.rtcMessage
otherUserId.current = data.callerId
setType('INCOMING_CALL')
})
socket.current.on('callAnswered', data => {
console.log('\n callAnswered data: \n', candidates)
remoteRTCMessage.current = data.rtcMessage
pc.current.setRemoteDescription(data.rtcMessage)
setType('WEBRTC_ROOM')
})
socket.current.on('oICEcandidate', data => {
if (pc.current.remoteDescription !== null) {
pc.current.addIceCandidate(
new RTCIceCandidate({
candidate: data.rtcMessage.candidate,
sdpMid: data.rtcMessage.id,
sdpMLineIndex: data.rtcMessage.label,
}))
.then(data => { console.log('\n oICEcandidate ice SUCCESS \n') })
.catch(err => { console.log('\n ICEERROR \n', err) })
} else {
candidates.current.push(data.rtcMessage)
}
console.log('\n oICEcandidate data: \n', data)
})
}, [])
useEffect(() => {
pc.current.addEventListener('connectionstatechange', event => {
console.log('\n listener connectionstatechange event: \n', event)
})
pc.current.addEventListener('icecandidate', event => {
console.log('\n listener icecandidate event: \n', event)
if (!event.candidate) {
return
} else {
sendICEcandidate({
calleeId: otherUserId.current,
rtcMessage: {
label: event.candidate.sdpMLineIndex,
id: event.candidate.sdpMid,
candidate: event.candidate.candidate,
},
})
}
})
pc.current.addEventListener('icecandidateerror', event => {
console.log('\n listener icecandidateerror event: \n', event)
})
pc.current.addEventListener('iceconnectionstatechange', event => {
console.log('\n listener iceconnectionstatechange event: \n', event)
})
pc.current.addEventListener('negotiationneeded', event => {
console.log('\n listener negotiationneeded event: \n', event)
pc.current.createOffer()
.then(offer => {
pc.current.setLocalDescription(offer)
return Promise.resolve(offer)
})
.then(offer => {
sendCall({
calleeId: otherUserId.current,
rtcMessage: offer,
})
})
.catch(err => console.error(err))
})
pc.current.addEventListener('signalingstatechange', event => {
console.log('\n listener signalingstatechange event: \n', event)
})
pc.current.addEventListener('track', event => {
console.log('\n listener track event: \n', event)
const newStream = remoteStream
newStream.addTrack(event.track)
setRemoteStream(newStream)
})
}, [])
const processCall = async () => {
let mediaConstraints = {
audio: true,
video: {
frameRate: 30,
facingMode: 'user'
}
}
try {
const mediaStream = await mediaDevices.getUserMedia(mediaConstraints)
setlocalStream(mediaStream)
mediaStream.getTracks().forEach((track) => {
pc.current.addTrack(track, mediaStream)
})
//console.log("\nDEBUG\n", pc.current)
} catch (err) {
console.log(console.log('\n processCall error: \n', err))
}
}
const processAccept = async () => {
let mediaConstraints = {
audio: true,
video: {
frameRate: 30,
facingMode: 'user'
}
}
try {
pc.current.setRemoteDescription(remoteRTCMessage.current)
const answer = await pc.current.createAnswer()
await pc.current.setLocalDescription(answer)
const mediaStream = await mediaDevices.getUserMedia(mediaConstraints)
setlocalStream(mediaStream)
answerCall({
callerId: otherUserId.current,
rtcMessage: answer,
})
candidates.current.map(candidate => pc.current.addIceCandidate(
new RTCIceCandidate({
candidate: candidate.candidate,
sdpMid: candidate.id,
sdpMLineIndex: candidate.label,
}))
.then(data => { console.log('\n processAccept ice SUCCESS \n') })
.catch(err => { console.log('Errorseew', err) })
)
candidates.current = []
//console.log("\nDEBUG\n", pc.current)
} catch (err) {
console.log(console.log('\n processCall error: \n', err))
}
}
function switchCamera() {
localStream.getVideoTracks().forEach(track => {
track._switchCamera()
})
}
function toggleCamera() {
localWebcamOn ? setlocalWebcamOn(false) : setlocalWebcamOn(true)
localStream.getVideoTracks().forEach(track => {
localWebcamOn ? (track.enabled = false) : (track.enabled = true)
})
}
function toggleMic() {
localMicOn ? setlocalMicOn(false) : setlocalMicOn(true)
localStream.getAudioTracks().forEach(track => {
localMicOn ? (track.enabled = false) : (track.enabled = true)
})
}
function leave() {
pc.current.close()
setlocalStream(null)
setType('JOIN')
}
UI代码:
const JoinScreen = () => {
return (
<KeyboardAvoidingView
behavior={Platform.OS === 'ios' ? 'padding' : 'height'}
style={{
flex: 1,
backgroundColor: '#050A0E',
justifyContent: 'center',
paddingHorizontal: 42,
}}>
<TouchableWithoutFeedback onPress={Keyboard.dismiss}>
<>
<View
style={{
padding: 35,
backgroundColor: '#1A1C22',
justifyContent: 'center',
alignItems: 'center',
borderRadius: 14,
}}>
<Text
style={{
fontSize: 18,
color: '#D0D4DD',
}}>
Your Caller ID
</Text>
<View
style={{
flexDirection: 'row',
marginTop: 12,
alignItems: 'center',
}}>
<Text
style={{
fontSize: 32,
color: '#ffff',
letterSpacing: 6,
}}>
{callerId}
</Text>
</View>
</View>
<View
style={{
backgroundColor: '#1A1C22',
padding: 40,
marginTop: 25,
justifyContent: 'center',
borderRadius: 14,
}}>
<Text
style={{
fontSize: 18,
color: '#D0D4DD',
}}>
Enter call id of another user
</Text>
<TextInputContainer
placeholder={'Enter Caller ID'}
value={otherUserId.current}
setValue={text => {
otherUserId.current = text;
console.log('TEST', otherUserId.current);
}}
keyboardType={'number-pad'}
/>
<TouchableOpacity
onPress={() => {
setType('OUTGOING_CALL')
processCall()
}}
style={{
height: 50,
backgroundColor: '#5568FE',
justifyContent: 'center',
alignItems: 'center',
borderRadius: 12,
marginTop: 16,
}}>
<Text
style={{
fontSize: 16,
color: '#FFFFFF',
}}>
Call Now
</Text>
</TouchableOpacity>
</View>
</>
</TouchableWithoutFeedback>
</KeyboardAvoidingView>
);
};
const OutgoingCallScreen = () => {
return (
<View
style={{
flex: 1,
justifyContent: 'space-around',
backgroundColor: '#050A0E',
}}>
<View
style={{
padding: 35,
justifyContent: 'center',
alignItems: 'center',
borderRadius: 14,
}}>
<Text
style={{
fontSize: 16,
color: '#D0D4DD',
}}>
Calling to...
</Text>
<Text
style={{
fontSize: 36,
marginTop: 12,
color: '#ffff',
letterSpacing: 6,
}}>
{otherUserId.current}
</Text>
</View>
<View
style={{
justifyContent: 'center',
alignItems: 'center',
}}>
<TouchableOpacity
onPress={() => {
setType('JOIN');
otherUserId.current = null;
}}
style={{
backgroundColor: '#FF5D5D',
borderRadius: 30,
height: 60,
aspectRatio: 1,
justifyContent: 'center',
alignItems: 'center',
}}>
<CallEnd width={50} height={12} />
</TouchableOpacity>
</View>
</View>
);
};
const IncomingCallScreen = () => {
return (
<View
style={{
flex: 1,
justifyContent: 'space-around',
backgroundColor: '#050A0E',
}}>
<View
style={{
padding: 35,
justifyContent: 'center',
alignItems: 'center',
borderRadius: 14,
}}>
<Text
style={{
fontSize: 36,
marginTop: 12,
color: '#ffff',
}}>
{otherUserId.current} is calling..
</Text>
</View>
<View
style={{
justifyContent: 'center',
alignItems: 'center',
}}>
<TouchableOpacity
onPress={() => {
processAccept()
setType('WEBRTC_ROOM')
}}
style={{
backgroundColor: 'green',
borderRadius: 30,
height: 60,
aspectRatio: 1,
justifyContent: 'center',
alignItems: 'center',
}}>
<CallAnswer height={28} fill={'#fff'} />
</TouchableOpacity>
</View>
</View>
);
};
const WebrtcRoomScreen = () => {
return (
<View
style={{
flex: 1,
backgroundColor: '#050A0E',
paddingHorizontal: 12,
paddingVertical: 12,
}}>
{localStream ? (
<RTCView
objectFit={'cover'}
style={{ flex: 1, backgroundColor: '#050A0E' }}
streamURL={localStream.toURL()}
/>
) : null}
{remoteStream ? (
<RTCView
objectFit={'cover'}
style={{
flex: 1,
backgroundColor: '#050A0E',
marginTop: 8,
}}
streamURL={remoteStream.toURL()}
/>
) : null}
<View
style={{
marginVertical: 12,
flexDirection: 'row',
justifyContent: 'space-evenly',
}}>
<IconContainer
backgroundColor={'red'}
onPress={() => {
leave()
}}
Icon={() => {
return <CallEnd height={26} width={26} fill="#FFF" />;
}}
/>
<IconContainer
style={{
borderWidth: 1.5,
borderColor: '#2B3034',
}}
backgroundColor={!localMicOn ? '#fff' : 'transparent'}
onPress={() => {
toggleMic()
}}
Icon={() => {
return localMicOn ? (
<MicOn height={24} width={24} fill="#FFF" />
) : (
<MicOff height={28} width={28} fill="#1D2939" />
);
}}
/>
<IconContainer
style={{
borderWidth: 1.5,
borderColor: '#2B3034',
}}
backgroundColor={!localWebcamOn ? '#fff' : 'transparent'}
onPress={() => {
toggleCamera()
}}
Icon={() => {
return localWebcamOn ? (
<VideoOn height={24} width={24} fill="#FFF" />
) : (
<VideoOff height={36} width={36} fill="#1D2939" />
);
}}
/>
<IconContainer
style={{
borderWidth: 1.5,
borderColor: '#2B3034',
}}
backgroundColor={'transparent'}
onPress={() => {
switchCamera()
}}
Icon={() => {
return <CameraSwitch height={24} width={24} fill="#FFF" />;
}}
/>
</View>
</View>
);
};
switch (type) {
case 'JOIN':
return JoinScreen();
case 'INCOMING_CALL':
return IncomingCallScreen();
case 'OUTGOING_CALL':
return OutgoingCallScreen();
case 'WEBRTC_ROOM':
return WebrtcRoomScreen();
default:
return null;
}
}
export default Call
当人A开始呼叫并且人B接受呼叫时,人B可以听到人A,人A听不到人B。我换了。都是一样的,但有镜像我已经浪费了30个小时去修了。感觉糟透了。
1条答案
按热度按时间aelbi1ox1#
如果您在信令过程中只有一次报价-应答交换,则会发生这种情况。
这就是为什么它在这种情况下不起作用:
1.用户A创建RTCPeerConnection;添加其音轨并向用户B发送报价。该提议包含具有用户A想要发送音频的信息的SDP,并且其呈现其支持的编解码器和其他音频参数。
1.用户B接收该提议,添加其自己的媒体轨道并发送应答。应答将包含用户B的媒体详细信息以及 * 为用户A的音频 * 选择的媒体参数。
1.用户A收到答案。现在它知道要使用哪些参数来发送音频,并可以开始发送。它还接收用户B想要发送其自己的音频的信息并获得支持的参数。
这里的问题是,用户B已经呈现了其支持的编解码器,但从未从A接收到它应该使用什么参数来编码音频的信息。这导致B根本不发送音频。
一个简单的解决方法是在建立RTCPeerConnection时生成并发送另一个报价。