Explorar el Código

刪除 'main.py'

oak hace 2 años
padre
commit
c15792ab43
Se han modificado 1 ficheros con 0 adiciones y 465 borrados
  1. 0 465
      main.py

+ 0 - 465
main.py

@@ -1,465 +0,0 @@
-import openshot
-import re
-from PIL import Image,ImageDraw,ImageFont
-import pandas as pd
-import os
-import cv2
-import numpy as np
-# import moviepy.editor as mp
-import time
-import pysrt
-import shutil
-import rpyc
-import random
-import string
-import requests
-from bs4 import BeautifulSoup
-import zipfile
-
-def cKey(r,g,b,fuzz):
-    col=openshot.Color()
-    col.red=openshot.Keyframe(r)
-    col.green=openshot.Keyframe(g)
-    col.blue=openshot.Keyframe(b)
-    return openshot.ChromaKey(col, openshot.Keyframe(fuzz))
-
-def video_writer_init(path):
-    w = openshot.FFmpegWriter(path)
-    w.SetAudioOptions(True, "aac", 44100, 2, openshot.LAYOUT_STEREO, 3000000)
-    w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720,
-        openshot.Fraction(1, 1), False, False, 3000000)
-    return w
-
-def video_photo_clip(video=None,layer=None, position=None, end=None
-    ,scale_x=1,scale_y=1,location_x=0,location_y=0,ck=None,audio=True):
-    clip = openshot.Clip(video)
-    clip.Layer(layer)
-    clip.Position(position)
-    clip.End(end)
-    clip.scale_x=openshot.Keyframe(scale_x)
-    clip.scale_y=openshot.Keyframe(scale_y)
-    clip.location_x=openshot.Keyframe(location_x)
-    clip.location_y=openshot.Keyframe(location_y)
-    
-    if ck!=None:
-        clip.AddEffect(ck)
-    if audio==True:
-        clip.has_audio=openshot.Keyframe(1)
-    else:
-        clip.has_audio=openshot.Keyframe(0)
-    return clip
-
-def trim_punctuation(s):
-    pat_block = u'[^\u4e00-\u9fff0-9a-zA-Z]+'
-    pattern = u'([0-9]+{0}[0-9]+)|{0}'.format(pat_block)
-    res = re.sub(pattern, lambda x: x.group(1) if x.group(1) else u" " ,s)
-    return res
-
-
-def randomString(stringLength=10):
-    letters = string.ascii_lowercase
-    return ''.join(random.choice(letters) for i in range(stringLength))
-
-
-def mp3_to_anchor(fname):
-
-    conn = rpyc.classic.connect("192.168.192.221",18812)
-    fr=open(fname,'rb')
-    ropen = conn.builtins.open
-    randname=randomString(10)
-    finalname=randomString(10)
-
-    fw=ropen('/tmp/'+randname+'.mp4','wb')
-    fw.write(fr.read())
-    fw.close()
-    ros = conn.modules.os
-    ros.system('/root/to_video/p9.sh '+randname+".mp4 "+finalname+".mp4")
-    return 'http://192.168.192.221/video/'+finalname+'.mp4'
-#    conn.execute('import os')
-
-
-def download_mp4(url,name):
-    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'}
-    with open('input_self/AI_girl/ai_spokesgirl_%s.mp4'%(name),'wb') as f:
-        r = requests.get(url, headers=headers, stream=True)
-        if r.status_code == 404:
-            return False
-        for chunk in r.iter_content(chunk_size=1024):
-            if chunk:
-                f.write(chunk)
-    return True
-
-
-
-                
-   
- 
-
-def text_to_short_vedio_create(read_csv_use="導盲犬協會.csv",pwd_use="導盲犬協會影片素材2/",op="input_self/LOGO_OP_4.mp4",ed="input_self/LOGO_ED.mp4",bg="input_self/串場樣板1_long.mp4",input_zip="input/input_data.zip"):
-    t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
-    t.Open()
-
-    files=os.listdir("input/")
-    print(files)
-    for i in files:
-        if i[-4:]==".zip":
-            input_zip="input/"+i
-            break
-
-    # try:
-    #     os.remove("./templates/index.html")
-    # except:
-    #     pass
-    with zipfile.ZipFile(input_zip, 'r') as zf:
-        for fn in zf.namelist():
-
-            right_fn = fn.encode('cp437').decode('big5')  # 將檔名正確編碼
-            check_p=right_fn.split("/")
-            if right_fn[-1]=="/" :
-                os.mkdir("input/"+right_fn[:-1])
-                pwd_use=right_fn
-                break
-            if len(check_p)==2:
-                os.mkdir("input/"+check_p[0])
-                pwd_use=check_p[0]+"/"
-                break
-
-    with zipfile.ZipFile(input_zip, 'r') as zf:
-        for fn in zf.namelist():
-            right_fn = fn.encode('cp437').decode('big5')  # 將檔名正確編碼
-            check_p=right_fn.split("/")
-            if right_fn[-1]=="/" :
-                continue
-            if right_fn[-4:]==".csv":
-                read_csv_use = right_fn
-            with open("input/"+right_fn, 'wb') as output_file:  # 建立並開啟新檔案
-                with zf.open(fn, 'r') as origin_file:  # 開啟原檔案
-                    shutil.copyfileobj(origin_file, output_file)  # 將原檔案內容複製到新檔案
-
-
-
-
-
-    # 去背參數
-    # ck=cKey(0,254,0,270)
-    ck=cKey(0,255,0,320)
-    ck_anchor=None
-    #時間
-    time_= 0
-    
-	
-    csv_use=pd.read_csv("input/"+read_csv_use)
-    csv_use=csv_use.dropna(how='all')
-    csv_use.reset_index(inplace=True)
-
-    anchor_op = openshot.FFmpegReader(op)
-    anchor_op.Open()
-    anchor_clip_op = video_photo_clip(video=anchor_op,layer=2,scale_x=1,scale_y=1,
-            location_x=0,location_y=0,position=time_, end=anchor_op.info.duration,ck=ck_anchor,audio=True)
-    
-    t.AddClip(anchor_clip_op)
-    anchor_op.Close()
-    time_+=anchor_op.info.duration
-
-
-    for i in range(len(csv_use)):
-    # for i in range(3):
-        pwd_=str(csv_use.loc[i,['音檔']].values[0])
-
-        
-        locals()['anchor_music'+str(i)] = openshot.FFmpegReader("input/"+pwd_use+pwd_)
-        locals()['anchor_music'+str(i)].Open()
-        locals()['anchor_music'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor_music'+str(i)],layer=3,scale_x=0,scale_y=0,
-                   location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=True)
-        t.AddClip(locals()['anchor_music'+str(i)+'clip'])
-        locals()['anchor_music'+str(i)].Close()
-
-        if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
-            scale_x_use = 0.59
-            scale_y_use = 0.59
-            t1 = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
-            t1.Open()
-            pwd_=str(csv_use.loc[i,['音檔']].values[0])
-
-        
-            locals()['anchor_music_t'+str(i)] = openshot.FFmpegReader("input/"+pwd_use+pwd_)
-            locals()['anchor_music_t'+str(i)].Open()
-            locals()['anchor_music_t'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor_music_t'+str(i)],layer=3,scale_x=0,scale_y=0,
-                   location_x=0,location_y=0,position=0, end=locals()['anchor_music_t'+str(i)].info.duration,ck=ck_anchor,audio=True)
-            t1.AddClip(locals()['anchor_music_t'+str(i)+'clip'])
-            locals()['anchor_music_t'+str(i)].Close()
-
-            w = video_writer_init("input_self/tmp1/%s.mp4"%('anchor_music'+str(i)))
-            w.Open()
-            frames = int(t1.info.fps)*int(locals()['anchor_music_t'+str(i)].info.duration)
-            for n in range(frames):
-                f=t1.GetFrame(n)
-                w.WriteFrame(f)
-            w.Close()
-            t1.Close()
-
-            fname=mp3_to_anchor("input_self/tmp1/%s.mp4"%('anchor_music'+str(i)))
-            # print(fname)
-            time.sleep(180)
-            while True:
-                result = download_mp4(fname,str(i))
-                if result:
-                    break
-                print('等待...')
-                time.sleep(60)
-
-
-        else:
-            scale_x_use = 1
-            scale_y_use = 1
-
-        choose=str(csv_use.loc[i,['素材']].values[0]).split(".")[-1]
-        pwd_p1=str(csv_use.loc[i,['素材']].values[0])
-
-
-
-
-        if choose == 'mp4':
-            locals()['anchor'+str(i)] = openshot.FFmpegReader("input/"+pwd_use+pwd_p1)
-            locals()['anchor'+str(i)].Open()
-            locals()['anchor'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor'+str(i)],layer=4,scale_x=scale_x_use,scale_y=scale_y_use,
-               location_x=-0.04,location_y=-0.04,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
-        
-            t.AddClip(locals()['anchor'+str(i)+'clip'])
-            locals()['anchor'+str(i)].Close()
-
-            if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
-                locals()['anchor_ad'+str(i)] = openshot.FFmpegReader('input_self/AI_girl/ai_spokesgirl_%s.mp4'%(str(i)))
-                locals()['anchor_ad'+str(i)].Open()
-                locals()['anchor_clip_ad'+str(i)] = video_photo_clip(video=locals()['anchor_ad'+str(i)],layer=6,scale_x=0.8,scale_y=0.8,
-                   location_x=0.38,location_y=0.35,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck,audio=False)
-                t.AddClip(locals()['anchor_clip_ad'+str(i)])
-                locals()['anchor_ad'+str(i)].Close()
-
-                locals()['anchor_bg'+str(i)] = openshot.FFmpegReader(bg)
-                locals()['anchor_bg'+str(i)].Open()
-                locals()['anchor_clip_bg'+str(i)] = video_photo_clip(video=locals()['anchor_bg'+str(i)],layer=2,scale_x=1,scale_y=1,
-                   location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
-                t.AddClip(locals()['anchor_clip_bg'+str(i)])
-                locals()['anchor_bg'+str(i)].Close()
-         
-
-        elif choose == 'jpg' or choose == 'png':
-            locals()['anchor'+str(i)] = openshot.QtImageReader("input/"+pwd_use+pwd_p1)
-            locals()['anchor'+str(i)].Open()
-            locals()['anchor'+str(i)+'clip'] = video_photo_clip(video=locals()['anchor'+str(i)],layer=4,scale_x=scale_x_use,scale_y=scale_y_use,
-               location_x=-0.04,location_y=-0.04,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
-            t.AddClip(locals()['anchor'+str(i)+'clip'])
-            locals()['anchor'+str(i)].Close()
-            if str(csv_use.loc[i,['是否要場景']].values[0])=="是":
-                locals()['anchor_ad'+str(i)] = openshot.FFmpegReader("input_self/AI_girl/ai_spokesgirl_%s.mp4"%(str(i)))
-                locals()['anchor_ad'+str(i)].Open()
-                locals()['anchor_clip_ad'+str(i)] = video_photo_clip(video=locals()['anchor_ad'+str(i)],layer=6,scale_x=0.8,scale_y=0.8,
-                   location_x=0.38,location_y=0.35,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck,audio=False)
-                t.AddClip(locals()['anchor_clip_ad'+str(i)])
-                locals()['anchor_ad'+str(i)].Close()
-
-                locals()['anchor_bg'+str(i)] = openshot.FFmpegReader(bg)
-                locals()['anchor_bg'+str(i)].Open()
-                locals()['anchor_clip_bg'+str(i)] = video_photo_clip(video=locals()['anchor_bg'+str(i)],layer=2,scale_x=1,scale_y=1,
-                   location_x=0,location_y=0,position=time_, end=locals()['anchor_music'+str(i)].info.duration,ck=ck_anchor,audio=False)
-                t.AddClip(locals()['anchor_clip_bg'+str(i)])
-                locals()['anchor_bg'+str(i)].Close()
-
-        time_+=locals()['anchor_music'+str(i)].info.duration
-
-
-
-
-
-            
- 
-    
-
-    
-    anchor_ed = openshot.FFmpegReader(ed)
-    anchor_ed.Open()
-    anchor_clip_ed = video_photo_clip(video=anchor_ed,layer=2,scale_x=1,scale_y=1,
-            location_x=0,location_y=0,position=time_, end=anchor_ed.info.duration,ck=ck_anchor,audio=True)
-    time_+=anchor_ed.info.duration
-    t.AddClip(anchor_clip_ed)
-    anchor_ed.Close()
-
-
-
-  
-
-
-
-
-
-
-
-
-
-
-
-
-
-    
-    
-    w = video_writer_init("input_self/tmp1/test.mp4")
-    w.Open()
-    
-    frames = int(t.info.fps)*int(time_)
-    for n in range(frames):
-        f=t.GetFrame(n)
-        w.WriteFrame(f)
-
-    t.Close()
-    w.Close()
-    shutil.rmtree('input')
-    os.mkdir('input')
-    shutil.rmtree('input_self/AI_girl')
-    os.mkdir('input_self/AI_girl')
-#文字轉圖片
-def txt2image(content, save_target,lang='zh',size=26,fon="input_self/font/DFT_B7.ttc"):
-    unicode_text = trim_punctuation(content)
-    font = ''
-    if lang=='zh':
-        font = ImageFont.truetype(font=fon, size=size)
-    else :
-        font = ImageFont.truetype(font="input_self/font/arial.ttf", size=size)
-    
-    W, H = (1280,500)
-    canvas = Image.new('RGB', (W, H), "#00FF00")
-    draw = ImageDraw.Draw(canvas)
-    
-    text= content
-    if "\n" in text:
-        w, h = draw.textsize(text.split("\n")[0],font = font)
-        #draw.text(((W-w)/2,0), text[0:18],'black', font)
-        text_border(draw,(W-w)/2,0,text.split("\n")[0],font,'black','white')
-        w, h = draw.textsize(text.split("\n")[1],font = font)
-        #draw.text(((W-w)/2,h+2), text[18:],'black', font)
-        text_border(draw,(W-w)/2,h+2,text.split("\n")[1],font,'black','white')
-    else:
-        w, h = draw.textsize(content,font = font)
-        #draw.text(((W-w)/2,0), text,'black', font)
-        text_border(draw,(W-w)/2,0,text,font,'black','white')
-    canvas.save(save_target, "PNG")
-
-def text_border(draw,x,y,text,font,shadowcolor,fillcolor):
-    draw.text((x-1, y), text, font=font, fill=shadowcolor)
-    draw.text((x+1, y), text, font=font, fill=shadowcolor)
-    draw.text((x, y-1), text, font=font, fill=shadowcolor)
-    draw.text((x, y+1), text, font=font, fill=shadowcolor)
-
-    draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
-    draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
-    draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
-    draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
-    # thicker border
-    draw.text((x-2, y-2), text, font=font, fill=shadowcolor)
-    draw.text((x+2, y-2), text, font=font, fill=shadowcolor)
-    draw.text((x-2, y+2), text, font=font, fill=shadowcolor)
-    draw.text((x+2, y+2), text, font=font, fill=shadowcolor)
-
-    # now draw the text over it
-    draw.text((x, y), text, font=font, fill=fillcolor)
-
-def srt_to_csv(srt_file):
-    subs = pysrt.open(srt_file)
-    csv_file = srt_file.split('.')[0] + ".csv"
-    with open(csv_file, 'w', newline='') as csvfile:
-        # 建立 CSV 檔寫入器
-        writer = csv.writer(csvfile)
-        for context in subs:
-            writer.writerow([context.index, context.start,context.end, context.text])
-    return csv_file
-
-def csv_to_text(csv_file,text_font):
-    text_form = []
-    with open(csv_file, newline='') as csvfile:
-
-        # 讀取 CSV 檔案內容
-        rows = csv.reader(csvfile)
-
-        # 以迴圈輸出每一列
-        for row in rows:
-            start = datetime.strptime(row[1], "%H:%M:%S,%f")
-            end = datetime.strptime(row[2], "%H:%M:%S,%f") - datetime.strptime(row[1], "%H:%M:%S,%f")
-            end_timeStamp=end.seconds+0.000001*end.microseconds
-            start_timeStamp=start.minute*60+start.second+ 0.000001*start.microsecond
-            text_form.append({'text':row[3],'start':start_timeStamp,'end':end_timeStamp,'size':36,'font':text_font})
-    
-    return text_form
-def text_to_short_vedio(mp4_file ,sound_file,output_filename,text_font):
-    t = openshot.Timeline(1280, 720, openshot.Fraction(30000, 1000), 44100, 2, openshot.LAYOUT_STEREO)
-    t.Open()
-    
-    # 去背參數
-    ck = cKey(0, 254, 0, 270)
-    ck_anchor = cKey(0, 255, 0, 320)
-
-    anchor = openshot.FFmpegReader(mp4_file)
-    anchor.Open()
-    anchor_clip = video_photo_clip(video=anchor,layer=2,scale_x=1,scale_y=1,
-            location_x=0,location_y=0,position=0, end=anchor.info.duration,audio=True)
-    t.AddClip(anchor_clip)
-    anchor.Close()
-    number = 0
-
-    sound_srt_file = ""
-    #音檔自動產生srt(逐字稿)
-    if ".srt" in sound_file:
-        sound_srt_file = sound_file
-    elif not sound_file is None:
-        cmd = "autosub -S zh-TW -D zh-TW " + sound_file
-        os.system(cmd)
-        sound_srt_file = sound_file.split('.')[0] + ".srt"
-    csv_file = srt_to_csv(sound_srt_file)
-    text_form = csv_to_text(csv_file,text_font)
-    print(sound_srt_file)
-    #開啟srt檔
-    try:
-        # subs = pysrt.open(sound_srt_file)
-        # text_form = []
-        # for context in subs:
-        #     #print(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
-        #     end = context.end-context.start
-        #     end_timeStamp=(end.minutes*60+end.seconds+ 0.001*end.milliseconds)
-        #     start_timeStamp=(context.start.minutes*60+context.start.seconds+ 0.001*context.start.milliseconds)
-        #     text_form.append({'text':context.text,'start':start_timeStamp,'end':end_timeStamp,'size':36,'font':text_font})
-        number = 0
-        for text_tmp in text_form:
-            file_name = "input_self/tmp/save_target_" + str(number) + ".png"
-            txt2image(text_tmp['text'], file_name,lang='zh',size = text_tmp['size'],fon = text_tmp['font'])
-            exec('text_anchor_{} = openshot.QtImageReader("input_self/tmp/save_target_{}.png")'.format(number,number))
-            exec('text_anchor_{}.Open()'.format(number))
-            exec('text_anchor_{}.Open()'.format(number))
-            exec('text_anchor_clip_{} = video_photo_clip(video=text_anchor_{},layer=4,scale_x=1,scale_y=1,\
-                    location_x=0,location_y=0.67,position=text_tmp["start"], end=text_tmp["end"],ck=ck_anchor,audio=True)'.format(number,number))
-            exec('t.AddClip(text_anchor_clip_{})'.format(number))
-            exec('text_anchor_{}.Close()'.format(number))
-            number = number+1
-    except:
-        print("無法開啟srt檔案(字幕產生失敗)")
-
-    w = video_writer_init(output_filename)
-    w.Open()
-
-    frames = int(t.info.fps)*int(anchor.info.duration)
-    for n in range(frames):
-        f=t.GetFrame(n)
-        w.WriteFrame(f)
-
-    t.Close()
-    w.Close()
-
-    #刪除暫存檔案
-    shutil.rmtree('input_self/tmp')
-    os.mkdir('input_self/tmp')
-    shutil.rmtree('input_self/tmp1')
-    os.mkdir('input_self/tmp1')
-
-
-if __name__ == '__main__':
-	text_to_short_vedio_create()
-	text_to_short_vedio(mp4_file = "input_self/tmp1/test.mp4",sound_file ='input_self/tmp1/test.mp4',
-        output_filename="output/demo.mp4",text_font ="input_self/font/DFT_R7.ttc")