openshotExample3.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. import openshot
  2. import re
  3. from PIL import Image,ImageDraw,ImageFont
  4. import pandas as pd
  5. import os
  6. import cv2
  7. import numpy as np
  8. import moviepy.editor as mp
  9. import time
  10. import pysrt
  11. import shutil
  12. import rpyc
  13. import random
  14. import string
  15. import requests
  16. from bs4 import BeautifulSoup
  17. def cKey(r,g,b,fuzz):
  18. col=openshot.Color()
  19. col.red=openshot.Keyframe(r)
  20. col.green=openshot.Keyframe(g)
  21. col.blue=openshot.Keyframe(b)
  22. return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
  23. def video_writer_init(path):
  24. w = openshot.FFmpegWriter(path)
  25. w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
  26. w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
  27. openshot.Fraction(1, 1), False, False, 3000000)
  28. return w
  29. def video_photo_clip(video=None,layer=None, position=None, end=None
  30. ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True):
  31. clip = openshot.Clip(video)
  32. clip.Layer(layer)
  33. clip.Position(position)
  34. clip.End(end)
  35. clip.scale_x=openshot.Keyframe(scale_x)
  36. clip.scale_y=openshot.Keyframe(scale_y)
  37. clip.location_x=openshot.Keyframe(location_x)
  38. clip.location_y=openshot.Keyframe(location_y)
  39. if ck!=None:
  40. clip.AddEffect(ck)
  41. if audio==True:
  42. clip.has_audio=openshot.Keyframe(1)
  43. else:
  44. clip.has_audio=openshot.Keyframe(0)
  45. return clip
  46. def trim_punctuation(s):
  47. pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+'
  48. pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block)
  49. res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s)
  50. return res
  51. def randomString(stringLength=10):
  52. letters = string.ascii_lowercase
  53. return ''.join(random.choice(letters) for i in range(stringLength))
  54. def mp3_to_anchor(fname):
  55. conn = rpyc.classic.connect("192.168.192.221",18812)
  56. fr=open(fname,'rb')
  57. ropen = conn.builtins.open
  58. randname=randomString(10)
  59. finalname=randomString(10)
  60. fw=ropen('/tmp/'+randname+'.mp4','wb')
  61. fw.write(fr.read())
  62. fw.close()
  63. ros = conn.modules.os
  64. ros.system('/root/to_video/p9.sh '+randname+".mp4 "+finalname+".mp4")
  65. return 'http://192.168.192.221/video/'+finalname+'.mp4'
  66. # conn.execute('import os')
  67. def download_mp4(url,name):
  68. headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'}
  69. with open('input/bg/ai_spokesgirl_%s.mp4'%(name),'wb') as f:
  70. r = requests.get(url, headers=headers, stream=True)
  71. if r.status_code == 404:
  72. return False
  73. for chunk in r.iter_content(chunk_size=1024):
  74. if chunk:
  75. f.write(chunk)
  76. return True
  77. def text_to_short_vedio_create(title_ad="input/people/peggy1_1_long.mp4",op="input/bg/LOGO_OP_4.mp4",ed="input/bg/LOGO_ED.mp4",bg="input/bg/串場樣板1_long.mp4"):
  78. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  79. t.Open()
  80. # 去背參數
  81. # ck=cKey(0,254,0,270)
  82. ck=cKey(0,255,0,320)
  83. ck_anchor=None
  84. #時間
  85. time_= 0
  86. csv_use=pd.read_csv("導盲犬協會.csv")
  87. csv_use=csv_use.dropna(how='all')
  88. csv_use.reset_index(inplace=True)
  89. anchor_op = openshot.FFmpegReader(op)
  90. anchor_op.Open()
  91. anchor_clip_op = video_photo_clip(video=anchor_op,layer=2,scale_x=1,scale_y=1,
  92. location_x=0,location_y=0,position=time_, end=anchor_op.info.duration,ck=ck_anchor,audio=True)
  93. t.AddClip(anchor_clip_op)
  94. anchor_op.Close()
  95. time_+=anchor_op.info.duration
  96. for i in range(len(csv_use)):
  97. # for i in range(3):
  98. pwd_=str(csv_use.loc[i,['音檔']].values[0])
  99. locals()['anchor_music'+str(i)] = openshot.FFmpegReader("導盲犬協會影片素材2/"+pwd_)
  100. locals()['anchor_music'+str(i)].Open()
  101. locals()['anchor_music'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor_music'+str(i)],layer=3,scale_x=0,scale_y=0,
  102. location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=True)
  103. t.AddClip(locals()['anchor_music'+str(i)+'clip'])
  104. locals()['anchor_music'+str(i)].Close()
  105. if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
  106. scale_x_use = 0.59
  107. scale_y_use = 0.59
  108. t1 = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  109. t1.Open()
  110. pwd_=str(csv_use.loc[i,['音檔']].values[0])
  111. locals()['anchor_music_t'+str(i)] = openshot.FFmpegReader("導盲犬協會影片素材2/"+pwd_)
  112. locals()['anchor_music_t'+str(i)].Open()
  113. locals()['anchor_music_t'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor_music_t'+str(i)],layer=3,scale_x=0,scale_y=0,
  114. location_x=0,location_y=0,position=0, end=locals()['anchor_music_t'+str(i)].info.duration,ck=ck_anchor,audio=True)
  115. t1.AddClip(locals()['anchor_music_t'+str(i)+'clip'])
  116. locals()['anchor_music_t'+str(i)].Close()
  117. w = video_writer_init("input/people/%s.mp4"%('anchor_music'+str(i)))
  118. w.Open()
  119. frames = int(t1.info.fps)*int(locals()['anchor_music_t'+str(i)].info.duration)
  120. for n in range(frames):
  121. f=t1.GetFrame(n)
  122. w.WriteFrame(f)
  123. w.Close()
  124. t1.Close()
  125. fname=mp3_to_anchor("input/people/%s.mp4"%('anchor_music'+str(i)))
  126. # print(fname)
  127. time.sleep(180)
  128. while True:
  129. result = download_mp4(fname,str(i))
  130. if result:
  131. break
  132. print('等待...')
  133. time.sleep(60)
  134. else:
  135. scale_x_use = 1
  136. scale_y_use = 1
  137. choose=str(csv_use.loc[i,['素材']].values[0]).split(".")[-1]
  138. pwd_p1=str(csv_use.loc[i,['素材']].values[0])
  139. if choose == 'mp4':
  140. locals()['anchor'+str(i)] = openshot.FFmpegReader("導盲犬協會影片素材2/"+pwd_p1)
  141. locals()['anchor'+str(i)].Open()
  142. locals()['anchor'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor'+str(i)],layer=4,scale_x=scale_x_use,scale_y=scale_y_use,
  143. location_x=-0.04,location_y=-0.04,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
  144. t.AddClip(locals()['anchor'+str(i)+'clip'])
  145. locals()['anchor'+str(i)].Close()
  146. if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
  147. locals()['anchor_ad'+str(i)] = openshot.FFmpegReader('input/bg/ai_spokesgirl_%s.mp4'%(str(i)))
  148. locals()['anchor_ad'+str(i)].Open()
  149. locals()['anchor_clip_ad'+str(i)] = video_photo_clip(video=locals()['anchor_ad'+str(i)],layer=6,scale_x=0.8,scale_y=0.8,
  150. location_x=0.38,location_y=0.35,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck,audio=False)
  151. t.AddClip(locals()['anchor_clip_ad'+str(i)])
  152. locals()['anchor_ad'+str(i)].Close()
  153. locals()['anchor_bg'+str(i)] = openshot.FFmpegReader(bg)
  154. locals()['anchor_bg'+str(i)].Open()
  155. locals()['anchor_clip_bg'+str(i)] = video_photo_clip(video=locals()['anchor_bg'+str(i)],layer=2,scale_x=1,scale_y=1,
  156. location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
  157. t.AddClip(locals()['anchor_clip_bg'+str(i)])
  158. locals()['anchor_bg'+str(i)].Close()
  159. elif choose == 'jpg':
  160. locals()['anchor'+str(i)] = openshot.QtImageReader("導盲犬協會影片素材2/"+pwd_p1)
  161. locals()['anchor'+str(i)].Open()
  162. locals()['anchor'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor'+str(i)],layer=4,scale_x=scale_x_use,scale_y=scale_y_use,
  163. location_x=-0.04,location_y=-0.04,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
  164. t.AddClip(locals()['anchor'+str(i)+'clip'])
  165. locals()['anchor'+str(i)].Close()
  166. if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
  167. locals()['anchor_ad'+str(i)] = openshot.FFmpegReader("input/bg/ai_spokesgirl_%s.mp4"%(str(i)))
  168. locals()['anchor_ad'+str(i)].Open()
  169. locals()['anchor_clip_ad'+str(i)] = video_photo_clip(video=locals()['anchor_ad'+str(i)],layer=6,scale_x=0.8,scale_y=0.8,
  170. location_x=0.38,location_y=0.35,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck,audio=False)
  171. t.AddClip(locals()['anchor_clip_ad'+str(i)])
  172. locals()['anchor_ad'+str(i)].Close()
  173. locals()['anchor_bg'+str(i)] = openshot.FFmpegReader(bg)
  174. locals()['anchor_bg'+str(i)].Open()
  175. locals()['anchor_clip_bg'+str(i)] = video_photo_clip(video=locals()['anchor_bg'+str(i)],layer=2,scale_x=1,scale_y=1,
  176. location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
  177. t.AddClip(locals()['anchor_clip_bg'+str(i)])
  178. locals()['anchor_bg'+str(i)].Close()
  179. time_+=locals()['anchor_music'+str(i)].info.duration
  180. anchor_ed = openshot.FFmpegReader(ed)
  181. anchor_ed.Open()
  182. anchor_clip_ed = video_photo_clip(video=anchor_ed,layer=2,scale_x=1,scale_y=1,
  183. location_x=0,location_y=0,position=time_, end=anchor_ed.info.duration,ck=ck_anchor,audio=True)
  184. time_+=anchor_ed.info.duration
  185. t.AddClip(anchor_clip_ed)
  186. anchor_ed.Close()
  187. w = video_writer_init("output/test7.mp4")
  188. w.Open()
  189. frames = int(t.info.fps)*int(time_)
  190. for n in range(frames):
  191. f=t.GetFrame(n)
  192. w.WriteFrame(f)
  193. t.Close()
  194. w.Close()
  195. #文字轉圖片
  196. def txt2image(content, save_target,lang='zh',size=26,fon="font/DFT_B7.ttc"):
  197. unicode_text = trim_punctuation(content)
  198. font = ''
  199. if lang=='zh':
  200. font = ImageFont.truetype(font=fon, size=size)
  201. else :
  202. font = ImageFont.truetype(font="font/arial.ttf", size=size)
  203. W, H = (1280,500)
  204. canvas = Image.new('RGB', (W, H), "#00FF00")
  205. draw = ImageDraw.Draw(canvas)
  206. text= content
  207. if "\n" in text:
  208. w, h = draw.textsize(text.split("\n")[0],font = font)
  209. #draw.text(((W-w)/2,0), text[0:18],'black', font)
  210. text_border(draw,(W-w)/2,0,text.split("\n")[0],font,'black','white')
  211. w, h = draw.textsize(text.split("\n")[1],font = font)
  212. #draw.text(((W-w)/2,h+2), text[18:],'black', font)
  213. text_border(draw,(W-w)/2,h+2,text.split("\n")[1],font,'black','white')
  214. else:
  215. w, h = draw.textsize(content,font = font)
  216. #draw.text(((W-w)/2,0), text,'black', font)
  217. text_border(draw,(W-w)/2,0,text,font,'black','white')
  218. canvas.save(save_target, "PNG")
  219. def text_border(draw,x,y,text,font,shadowcolor,fillcolor):
  220. draw.text((x-1, y), text, font=font, fill=shadowcolor)
  221. draw.text((x+1, y), text, font=font, fill=shadowcolor)
  222. draw.text((x, y-1), text, font=font, fill=shadowcolor)
  223. draw.text((x, y+1), text, font=font, fill=shadowcolor)
  224. draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
  225. draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
  226. draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
  227. draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
  228. # thicker border
  229. draw.text((x-2, y-2), text, font=font, fill=shadowcolor)
  230. draw.text((x+2, y-2), text, font=font, fill=shadowcolor)
  231. draw.text((x-2, y+2), text, font=font, fill=shadowcolor)
  232. draw.text((x+2, y+2), text, font=font, fill=shadowcolor)
  233. # now draw the text over it
  234. draw.text((x, y), text, font=font, fill=fillcolor)
  235. def text_to_short_vedio(mp4_file = "input/example/test3.mp4",sound_file = None
  236. ,output_filename="output/demo.mp4",text_font = "font/DFT_B7.ttc"):
  237. t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
  238. t.Open()
  239. # 去背參數
  240. ck = cKey(0, 254, 0, 270)
  241. ck_anchor = cKey(0, 255, 0, 320)
  242. anchor = openshot.FFmpegReader(mp4_file)
  243. anchor.Open()
  244. anchor_clip = video_photo_clip(video=anchor,layer=2,scale_x=1,scale_y=1,
  245. location_x=0,location_y=0,position=0, end=anchor.info.duration,audio=True)
  246. t.AddClip(anchor_clip)
  247. anchor.Close()
  248. number = 0
  249. sound_srt_file = ""
  250. #音檔自動產生srt(逐字稿)
  251. if ".srt" in sound_file:
  252. sound_srt_file = sound_file
  253. elif not sound_file is None:
  254. cmd = "autosub -S zh-TW -D zh-TW " + sound_file
  255. os.system(cmd)
  256. sound_srt_file = sound_file.split('.')[0] + ".srt"
  257. #開啟srt檔
  258. try:
  259. subs = pysrt.open(sound_srt_file)
  260. text_form = []
  261. for context in subs:
  262. #print(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
  263. end = context.end-context.start
  264. end_timeStamp=(end.minutes*60+end.seconds+ 0.001*end.milliseconds)
  265. start_timeStamp=(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
  266. text_form.append({'text':context.text,'start':start_timeStamp,'end':end_timeStamp,'size':36,'font':text_font})
  267. for text_tmp in text_form:
  268. file_name = "tmp/save_target_" + str(number) + ".png"
  269. txt2image(text_tmp['text'], file_name,lang='zh',size = text_tmp['size'],fon = text_tmp['font'])
  270. exec('text_anchor_{} = openshot.QtImageReader("tmp/save_target_{}.png")'.format(number,number))
  271. exec('text_anchor_{}.Open()'.format(number))
  272. exec('text_anchor_{}.Open()'.format(number))
  273. exec('text_anchor_clip_{} = video_photo_clip(video=text_anchor_{},layer=4,scale_x=1,scale_y=1,\
  274. location_x=0,location_y=0.67,position=text_tmp["start"], end=text_tmp["end"],ck=ck_anchor,audio=True)'.format(number,number))
  275. exec('t.AddClip(text_anchor_clip_{})'.format(number))
  276. exec('text_anchor_{}.Close()'.format(number))
  277. number = number+1
  278. except:
  279. print("無法開啟srt檔案(字幕產生失敗)")
  280. w = video_writer_init(output_filename)
  281. w.Open()
  282. frames = int(t.info.fps)*int(anchor.info.duration)
  283. for n in range(frames):
  284. f=t.GetFrame(n)
  285. w.WriteFrame(f)
  286. t.Close()
  287. w.Close()
  288. #刪除暫存檔案
  289. shutil.rmtree('tmp')
  290. os.mkdir('tmp')
  291. if __name__ == '__main__':
  292. # picture_change()
  293. text_to_short_vedio_create()
  294. text_to_short_vedio(mp4_file = "output/test7.mp4",sound_file ='output/test7.mp4',text_font ="font/DFT_R7.ttc")