op_and_ed.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. import openshot
  2. def cKey(r, g, b, fuzz):
  3. col = openshot.Color()
  4. col.red = openshot.Keyframe(r)
  5. col.green = openshot.Keyframe(g)
  6. col.blue = openshot.Keyframe(b)
  7. return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
  8. def video_photo_clip(video=None, layer=None, position=None, end=None
  9. , scale_x=1, scale_y=1, location_x=0, location_y=0, ck=None, audio=True):
  10. clip = openshot.Clip(video)
  11. clip.Layer(layer)
  12. clip.Position(position)
  13. clip.End(end)
  14. clip.scale_x = openshot.Keyframe(scale_x)
  15. clip.scale_y = openshot.Keyframe(scale_y)
  16. clip.location_x = openshot.Keyframe(location_x)
  17. clip.location_y = openshot.Keyframe(location_y)
  18. if ck != None:
  19. clip.AddEffect(ck)
  20. if audio == True:
  21. clip.has_audio = openshot.Keyframe(1)
  22. else:
  23. clip.has_audio = openshot.Keyframe(0)
  24. return clip
  25. def video_writer_init(path):
  26. w = openshot.FFmpegWriter(path)
  27. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  28. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  29. openshot.Fraction(1, 1), False, False, 3000000)
  30. return w
  31. def create_video(bg, title_bg, people, time):
  32. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  33. t.Open()
  34. ck = cKey(0, 254, 0, 270)
  35. ck_anchor = cKey(0, 255, 0, 320)
  36. # anchor_audio = openshot.FFmpegReader(audio) # 音檔
  37. # anchor_audio.Open()
  38. # anchor_clip_audio = video_photo_clip(video=anchor_audio, layer=1, scale_x=0.59, scale_y=0.59, location_x=-0.04, location_y=-0.04, position=0, end=anchor_audio.info.duration, ck=None, audio=True)
  39. # print(anchor_audio.info.duration)
  40. # t.AddClip(anchor_clip_audio)
  41. # anchor_audio.Close()
  42. anchor_video = openshot.FFmpegReader(bg) # 影片
  43. anchor_video.Open()
  44. anchor_clip_video = video_photo_clip(video=anchor_video, layer=3, scale_x=0.59, scale_y=0.59, location_x=-0.04,
  45. location_y=-0.04, position=0, end=20, ck=None, audio=False)
  46. t.AddClip(anchor_clip_video)
  47. anchor_video.Close()
  48. anchor_template = openshot.FFmpegReader(title_bg) # 樣板
  49. anchor_template.Open()
  50. anchor_clip_template = video_photo_clip(video=anchor_template, layer=2, scale_x=1, scale_y=1, location_x=0,
  51. location_y=0, position=0, end=20, ck=None, audio=True)
  52. t.AddClip(anchor_clip_template)
  53. anchor_template.Close()
  54. anchor_people = openshot.FFmpegReader(people) # 主播
  55. anchor_people.Open()
  56. anchor_clip_people = video_photo_clip(video=anchor_people, layer=4, scale_x=0.8, scale_y=0.8, location_x=0.38,
  57. location_y=0.35, position=0, end=20, ck=ck_anchor, audio=True)
  58. t.AddClip(anchor_clip_people)
  59. anchor_people.Close()
  60. w = video_writer_init("/app/output/Q-3.mp4")
  61. w.Open()
  62. # frames = int(t.info.fps)*int(time)
  63. frames = t.info.fps.ToInt() * int(time)
  64. print('結果一', frames)
  65. for n in range(frames):
  66. # tmp = n%(int(t.info.fps)*3) +int(t.info.fps)*int(2)
  67. f = t.GetFrame(n)
  68. w.WriteFrame(f)
  69. t.Close()
  70. w.Close()
  71. if __name__ == '__main__':
  72. # data = pd.read_csv('/Users/zooeytsai/Documents/導盲犬協會.csv')
  73. # for i in data:
  74. # if i['是否要場景'] == '是':
  75. # create_video(f"/app/input/movie_main/{i['素材']}", "/app/input/bg/串場樣板1.mp4", "/app/input/people/peggy1_1.mp4", f"/app/input/movie_main/{i['音檔']}")
  76. # else:
  77. # create_video(f"/app/input/movie_main/{i['素材']}", f"/app/input/movie_main/{i['音檔']}")
  78. create_video("/app/input/movie_main/2022_04_15_165219.jpg", "/app/input/bg/樣板.mp4",
  79. "/app/input/people/nwfggznfiy.mp4",
  80. 20)