opencv 如何在python中使用屏幕录制来录制语音

wvyml7n5  于 2023-10-24  发布在  Python
关注(0)|答案(2)|浏览(146)

我正在用Python制作一个屏幕记录器,在这个过程中,我成功地用Python PIL库捕获屏幕。
并将其显示在标签中(当前记录)。
现在我想要的是从用户录制音频时,用户点击我的GUI窗口中的音频录制按钮,并停止录制语音时,用户点击停止录制语音按钮。
最后,我想合并我的声音和视频录制到一个最终的输出,作为一个视频与语音
谁能告诉我如何合并两个代码在一个单一的脚本,并成功地运行它。

import datetime
import tkinter as tk
from tkinter import *
from tkinter import ttk ,FLAT
from PIL import Image, ImageTk, ImageGrab
import cv2
import numpy as np
import threading
import win32api
from tkinter.filedialog import asksaveasfilename

VIDEO_SIZE = (800,420)     #(960, 540)

cap = cv2.VideoCapture(0) 

date = datetime.datetime.now()
filename='rec_%s-%s-%s-%s%s%s.mp4' % (date.year, date.month, date.day,
                                                     date.hour, date.minute, date.second)

fourcc = cv2.VideoWriter_fourcc(*'H264')
frame_rate = 15

out = cv2.VideoWriter()
def change_i():
        if sound_btn.image == icon:
            start_recording()

            sound_btn.config(image=icon2)
            sound_btn.image = icon2
        else:
            stop_recording()

            sound_btn.config(image=icon)
            sound_btn.image = icon

def change_r():
        if rec_btn['text'] == 'Start Recording':
            start_recording()

            rec_btn.config(text="Stop Recoding")
        else:
            stop_recording()

            rec_btn.config(text="Start Recording")
def change_w():
        if cap_btn.image == web:
            start_webcam()

            cap_btn.config(image=web2)
            cap_btn.image = web2
        else:
            stop_webcam(None)

            cap_btn.config(image=web)
            cap_btn.image = web
# --- screen capture
def Cursor_pos(img,center,radius,color,thickness):
    center = tuple(map(int,center))
    rgb = [255*c for c in color[:3]] # convert to 0-255 scale for OpenCV
    alpha = color[-1]
    radius = int(radius)
    if thickness > 0:
        pad = radius + 2 + thickness
    else:
        pad = radius + 3
    roi = slice(center[1]-pad,center[1]+pad),slice(center[0]-pad,center[0]+pad)

    try:
        overlay = img[roi].copy()
        cv2.circle(img,center,radius,rgb, thickness=thickness, lineType=cv2.LINE_AA)
        opacity = alpha
        cv2.addWeighted(src1=img[roi], alpha=opacity, src2=overlay, beta=1. - opacity, gamma=0, dst=img[roi])
    except:
        logger.debug("transparent_circle would have been partially outside of img. Did not draw it.")

def recording_screen():

    global recording
    recording = True
    while recording:0,0.5), -1)
        #cv2.circle(frame, curpos, 10, (0,255,255), 2)
        frame = cv2.resize(frame, VIDEO_SIZE)
        tkimage.paste(Image.fromarray(frame))
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        out.write(frame)

def start_recording():

    if not out.isOpened():
    out.open(filename, fourcc, frame_rate, VIDEO_SIZE)
    threading.Thread(target=recording_screen, daemon=True).start()

def stop_recording():
    global recording
    recording = False
    out.release()

# --- webcam

webcam = None
WEBCAM_SIZE = (280, 200)

def read_frame(imgbox):

    if cap.isOpened():
        ret, frame = cap.read()
        if ret:
            frame = cv2.flip(frame, 1)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame = cv2.resize(frame, WEBCAM_SIZE)
            image = Image.fromarray(frame)
            imgbox.image.paste(image)
        webcam.after(20, read_frame, imgbox)

def stop_webcam(event):
    global webcam

    if webcam:
        webcam.destroy()
        webcam = None
        cap.release()
def start_webcam():
    global webcam

    if webcam is None:
        cap.isOpened()
        webcam = tk.Toplevel()
        #webcam = tk.lift()
        webcam.attributes("-topmost", True) #it keep the window on top of others
        webcam.geometry('{}x{}+5+520'.format(WEBCAM_SIZE[0], WEBCAM_SIZE[1]))
        webcam.overrideredirect(1)
        imgbox = tk.Label(webcam)
        imgbox.pack()
        imgbox.image = ImageTk.PhotoImage(image=Image.new('RGB',WEBCAM_SIZE,(0,0,0)))
        imgbox.config(image=imgbox.image)
        #webcam.bind('', stop_webcam)
        read_frame(imgbox)

# --- main

root = tk.Tk()
root.title('Screen Recorder')
#root.iconbitmap(r'rec_i.ico')  #for icon
root.resizable(width=False,height=False)
icon = PhotoImage(file='dh.png')
icon2 = PhotoImage(file='stop.png')
web = PhotoImage(file='webcam.png')
web2 = PhotoImage(file='webcamee.png')
root.geometry('+260+70')

tkimage = ImageTk.PhotoImage(Image.new('RGB', VIDEO_SIZE, (0,0,0)))

w, h = VIDEO_SIZE
vbox = tk.Label(root, image=tkimage, width=w, height=h, bg='black')
vbox.pack(pady=10,padx=25)

frame = tk.Frame(root)
frame.pack()

ound_btn = tk.Button(frame, image=icon, width=70,relief=FLAT ,command=change_i )
sound_btn.grid(row=0, column=1)
sound_btn.image = icon
cap_btn = tk.Button(frame, image=web, width=70,relief=FLAT, command=change_w)
cap_btn.grid(row=0, column=2)
cap_btn.image = web
message = Label(frame,text='**** press the start button to start recording ****')
message.grid(row=1, column=1)

root.mainloop()

我可以做什么来记录用户的声音通过麦克风,同时记录屏幕simentenusly。
有什么建议吗?

xxslljrj

xxslljrj1#

你可以只是简单地记录屏幕,以及你的声音在同一脚本.该代码将产生两个文件一个是.mp4和其他是.mp3,现在你设置去.添加该音频文件到视频文件和保存到路径.

tf7tbtn2

tf7tbtn22#

下面的代码将帮助您同时启动屏幕录制和音频录制。然后,它将使用MoviePy库将音频和视频合并组合在一起。请注意,代码中用于视频录制的帧速率和屏幕录制的实际帧速率可能不同。此外,请注意MoviePy可能需要一些时间来处理音频和视频。

import pyautogui
import numpy as np
import cv2
import threading 
import sounddevice as sd
import queue
import soundfile as sf
from moviepy.editor import AudioFileClip, VideoFileClip
import tkinter as tk
from tkinter import messagebox
import time
class ScreenRecorderApp:
    def __init__(self, root):
          self.root = root
          self.root.title("Screen Recorder")
          self.start_button = tk.Button(root, text="Start Recording", command=self.start_recording)
          self.start_button.pack()
          self.stop_button = tk.Button(root, text="Stop Recording", command=self.stop_recording, state=tk.DISABLED)
          self.stop_button.pack()
          # Set up screen dimensions
          self.screen_width, self.screen_height = pyautogui.size()
          # Set up the codec and create a VideoWriter object
          self.fourcc = cv2.VideoWriter_fourcc(*"XVID")
          self.out = None
          # Set up audio parameters
          self.sample_rate = 44100  # CD quality
          self.audio_buffer = queue.Queue()
          # Flag to indicate recording state
          self.recording = False
          self.recording_thread = None

    def start_recording(self):
       self.recording = True
       self.out = cv2.VideoWriter("output_screen_recording.avi", self.fourcc, 20.0, (self.screen_width, self.screen_height))
       self.start_button.config(state=tk.DISABLED)
       self.stop_button.config(state=tk.NORMAL)
       # Start recording video in a separate thread
       self.start_time = time.time()  # Record start time
       self.num_frames = 0  # Initialize frame counter
       self.recording_thread = threading.Thread(target=self.record_screen)
       self.recording_thread.start()
       # Start recording audio
       with sd.InputStream(callback=self.record_audio, channels=2, samplerate=self.sample_rate):
           self.root.wait_window()  # Wait for the recording to finish 
    def stop_recording(self):
       self.recording = False
       self.recording_thread.join()
       self.out.release()
       self.save_audio()
       self.combine_audio_video()
       self.start_button.config(state=tk.NORMAL)
       self.stop_button.config(state=tk.DISABLED)
       messagebox.showinfo("Recording Finished", "Recording has been saved.")
    def record_screen(self):
       while self.recording:
           screenshot = pyautogui.screenshot()
           frame = np.array(screenshot)
           frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
           self.out.write(frame)
           self.num_frames += 1  # Increment frame counter 

    def record_audio(self, indata, frames, time, status):
       self.audio_buffer.put(indata.copy())
    def save_audio(self):
       audio_data = []
       while not self.audio_buffer.empty():
           audio_data.append(self.audio_buffer.get())
       audio_data = np.concatenate(audio_data, axis=0)
       sf.write("output_recorded_audio.wav", audio_data, self.sample_rate)
    
    def combine_audio_video(self):
       video_clip = VideoFileClip("output_screen_recording.avi")
       audio_clip = AudioFileClip("output_recorded_audio.wav")

       # Calculate the actual frame rate based on recorded frames and elapsed time
       actual_frame_rate = self.num_frames / (time.time() - self.start_time)
    
       final_clip = video_clip.set_audio(audio_clip)
       final_clip = final_clip.set_duration(audio_clip.duration)  # Adjust duration
       final_clip = final_clip.set_fps(actual_frame_rate)  # Set the calculated frame rate
       final_clip.write_videofile("output_video.mp4", codec="libx264")
root = tk.Tk()
app = ScreenRecorderApp(root)
root.mainloop()

相关问题