123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899 |
- import openshot
- def cKey(r, g, b, fuzz):
- col = openshot.Color()
- col.red = openshot.Keyframe(r)
- col.green = openshot.Keyframe(g)
- col.blue = openshot.Keyframe(b)
- return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
- def video_photo_clip(video=None, layer=None, position=None, end=None
- , scale_x=1, scale_y=1, location_x=0, location_y=0, ck=None, audio=True):
- clip = openshot.Clip(video)
- clip.Layer(layer)
- clip.Position(position)
- clip.End(end)
- clip.scale_x = openshot.Keyframe(scale_x)
- clip.scale_y = openshot.Keyframe(scale_y)
- clip.location_x = openshot.Keyframe(location_x)
- clip.location_y = openshot.Keyframe(location_y)
-
- if ck != None:
- clip.AddEffect(ck)
- if audio == True:
- clip.has_audio = openshot.Keyframe(1)
- else:
- clip.has_audio = openshot.Keyframe(0)
- return clip
- def video_writer_init(path):
- w = openshot.FFmpegWriter(path)
- w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
- w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
- openshot.Fraction(1, 1), False, False, 3000000)
- return w
- def create_video(bg, title_bg, people, time):
- t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
- t.Open()
-
- ck = cKey(0, 254, 0, 270)
- ck_anchor = cKey(0, 255, 0, 320)
-
- # anchor_audio = openshot.FFmpegReader(audio) # 音檔
- # anchor_audio.Open()
- # anchor_clip_audio = video_photo_clip(video=anchor_audio, layer=1, scale_x=0.59, scale_y=0.59, location_x=-0.04, location_y=-0.04, position=0, end=anchor_audio.info.duration, ck=None, audio=True)
- # print(anchor_audio.info.duration)
- # t.AddClip(anchor_clip_audio)
- # anchor_audio.Close()
-
- anchor_video = openshot.FFmpegReader(bg) # 影片
- anchor_video.Open()
- anchor_clip_video = video_photo_clip(video=anchor_video, layer=3, scale_x=0.59, scale_y=0.59, location_x=-0.04,
- location_y=-0.04, position=0, end=20, ck=None, audio=False)
- t.AddClip(anchor_clip_video)
- anchor_video.Close()
-
- anchor_template = openshot.FFmpegReader(title_bg) # 樣板
- anchor_template.Open()
- anchor_clip_template = video_photo_clip(video=anchor_template, layer=2, scale_x=1, scale_y=1, location_x=0,
- location_y=0, position=0, end=20, ck=None, audio=True)
- t.AddClip(anchor_clip_template)
- anchor_template.Close()
-
- anchor_people = openshot.FFmpegReader(people) # 主播
- anchor_people.Open()
- anchor_clip_people = video_photo_clip(video=anchor_people, layer=4, scale_x=0.8, scale_y=0.8, location_x=0.38,
- location_y=0.35, position=0, end=20, ck=ck_anchor, audio=True)
- t.AddClip(anchor_clip_people)
- anchor_people.Close()
-
- w = video_writer_init("/app/output/Q-3.mp4")
- w.Open()
- # frames = int(t.info.fps)*int(time)
- frames = t.info.fps.ToInt() * int(time)
- print('結果一', frames)
- for n in range(frames):
- # tmp = n%(int(t.info.fps)*3) +int(t.info.fps)*int(2)
- f = t.GetFrame(n)
- w.WriteFrame(f)
-
- t.Close()
- w.Close()
- if __name__ == '__main__':
- # data = pd.read_csv('/Users/zooeytsai/Documents/導盲犬協會.csv')
- # for i in data:
- # if i['是否要場景'] == '是':
- # create_video(f"/app/input/movie_main/{i['素材']}", "/app/input/bg/串場樣板1.mp4", "/app/input/people/peggy1_1.mp4", f"/app/input/movie_main/{i['音檔']}")
- # else:
- # create_video(f"/app/input/movie_main/{i['素材']}", f"/app/input/movie_main/{i['音檔']}")
- create_video("/app/input/movie_main/2022_04_15_165219.jpg", "/app/input/bg/樣板.mp4",
- "/app/input/people/nwfggznfiy.mp4",
- 20)
|