123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102 |
- import util,os, math, time,queue,threading, rpyc
- import openshot
- rootPath = './'
- def filePrepare(name_hash):
- cPath = rootPath+name_hash+'/'
- try:
- os.mkdir(cPath)
- except FileExistsError:
- pass
- sub_dict,img_dict = util.parse_script("script.txt")
- util.generate_subtitle_image_from_dict(cPath, sub_dict)
- for imgd in img_dict:
- print(imgd)
- util.downloadFromDrive(cPath,imgd['imgid'],imgd['index'])
- util.call_anchor(cPath+'speech.mp3',7)
- return sub_dict,img_dict
- def genVideo(name_hash,sub_dict,img_dict):
- basicPath = rootPath+'basic/'
- cPath = rootPath+name_hash+'/'
- ck=util.cKey(0,254,0,270)
- ck_anchor=util.cKey(0,255,1,320)
- t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
- t.Open()
- main_timer = 0
- LOGO_OP = openshot.FFmpegReader(basicPath+"LOGO_OP_4.mp4")
- LOGO_OP.Open() # Open the reader
- head_duration = LOGO_OP.info.duration
- LOGO_OP_clip = util.video_photo_clip(vid=LOGO_OP,layer=4,position=0,end=head_duration)
- t.AddClip(LOGO_OP_clip)
- main_timer+=head_duration
- anchor = openshot.FFmpegReader(cPath+"/speaker.mp4")
- anchor.Open()
- anchor_clip = util.video_photo_clip(vid=anchor,layer=4,scale_x=0.65,scale_y=0.65,
- location_x=0.35,location_y=0.25,position=main_timer, end=anchor.info.duration,ck=ck_anchor,audio=False)
- t.AddClip(anchor_clip)
- speech = openshot.FFmpegReader(cPath+"/speech.mp3")
- speech.Open()
- speech_clip = openshot.Clip(speech)
- speech_clip.Position(main_timer)
- speech_clip.End(anchor.info.duration)
- t.AddClip(speech_clip)
- main_timer += anchor.info.duration
- anchor.Close()
- speech.Close()
- sub_img_list = [None] * len(sub_dict)
- sub_clip_list = [None] * len(sub_dict)
- for sub_obj in sub_dict:
- idx = int(sub_obj['index'])
- sub_img_list[idx] = openshot.QtImageReader(cPath +str(idx)+'.png')
- sub_img_list[idx].Open()
- sub_clip_list[idx] = util.video_photo_clip(vid=sub_img_list[idx], layer=5,location_x=0.069, location_y=0.89
- ,position=head_duration+sub_obj['start'],end=sub_obj['duration'])
- t.AddClip(sub_clip_list[idx])
- sub_img_list[idx].Close()
-
- img_list = [None] * len(img_dict)
- img_clip_list = [None] * len(img_dict)
- for img_d in img_dict:
- idx = int(img_d['index'])
- print(cPath +str(idx) +'img.jpg')
- try:
- img_list[idx] = openshot.QtImageReader(cPath +str(idx) +'img.jpg')
- img_list[idx].Open()
- except:
- img_list[idx] = openshot.QtImageReader(cPath +str(idx) +'img.png')
- img_list[idx].Open()
- img_clip_list[idx] = util.video_photo_clip(vid=img_list[idx], layer=3
- ,position=head_duration+img_d['start'],end=img_d['duration'])
- t.AddClip(img_clip_list[idx])
- img_list[idx].Close()
- w = util.video_writer_init("output.mp4")
- w.Open()
- frames = int(t.info.fps)*int(main_timer)
- for n in range(frames):
- f=t.GetFrame(n)
- w.WriteFrame(f)
- t.Close()
- w.Close()
- class podcast_service(rpyc.Service):
- def exposed_gen_video(self,name_hash, sub_dict, img_dict):
- genVideo(name_hash, sub_dict, img_dict)
- from rpyc.utils.server import ThreadedServer
- t = ThreadedServer(podcast_service, port=8838)
- print('service started')
- t.start()
|