Spaces:
Runtime error
Runtime error
Add Description
Browse files- Videobook/Videobook.py +2 -38
- app.py +2 -3
Videobook/Videobook.py
CHANGED
@@ -14,50 +14,14 @@ class Videobook:
|
|
14 |
def get_sentences(self, story):
|
15 |
return self.preprocessor(story)
|
16 |
|
17 |
-
# def generate_voice(self, story, sentences , path = 'tts.mp3'):
|
18 |
-
# for i,n in enumerate(sentences):
|
19 |
-
# tts=gTTS(n,lang='en')
|
20 |
-
# tts.save('tts'+str(i)+'.mp3')
|
21 |
-
# lgth=[]
|
22 |
-
# for i in range(len(sentences)):
|
23 |
-
# lgth.append(MP3('tts'+str(i)+'.mp3').info.length)
|
24 |
-
# os.remove(os.path.join(os.getcwd(),'tts'+str(i)+'.mp3'))
|
25 |
-
# tts=gTTS(story,lang='en')
|
26 |
-
# tts.save(path)
|
27 |
-
# return lgth
|
28 |
-
|
29 |
def generate_imgs(self, sentences, steps):
|
30 |
imgs = []
|
31 |
for sentence in sentences:
|
32 |
sentence['pos'] = self.style + ' of ' + sentence['pos'] + ', ' + self.tags
|
33 |
imgs.append(self.pipe.generate(prompt = sentence['pos'], negative_prompt = sentence['neg'], num_inference_steps = steps))
|
34 |
-
return imgs
|
35 |
-
|
36 |
-
# def addBuffer(self, imgs, lgth):
|
37 |
-
# imgs_buff = []
|
38 |
-
# for i,img in enumerate(imgs):
|
39 |
-
# for j in range(ceil(lgth[i] * self.fps)):
|
40 |
-
# imgs_buff.append(img)
|
41 |
-
# return imgs_buff
|
42 |
-
|
43 |
-
# def imgs_to_video(self, imgs, video_name='video.mp4'):
|
44 |
-
# video_dims = (imgs[0].width, imgs[0].height)
|
45 |
-
# fourcc = cv2.VideoWriter_fourcc(*'DIVX')
|
46 |
-
# video = cv2.VideoWriter(video_name, fourcc, self.fps, video_dims)
|
47 |
-
# for img in imgs:
|
48 |
-
# tmp_img = img.copy()
|
49 |
-
# video.write(cv2.cvtColor(np.array(tmp_img), cv2.COLOR_RGB2BGR))
|
50 |
-
# video.release()
|
51 |
-
|
52 |
-
# def make_video(self, imgs, lengths, video_name = "finished_video.mp4"):
|
53 |
-
# self.imgs_to_video(self.addBuffer(imgs, lengths), 'test_video.mp4')
|
54 |
-
# input_audio = ffmpeg.input(os.path.join(os.getcwd(),'tts.mp3'))
|
55 |
-
# input_video = ffmpeg.input(os.path.join(os.getcwd(),'test_video.mp4'))
|
56 |
-
# ffmpeg.concat(input_video, input_audio, v=1, a=1).output(video_name).run(overwrite_output=True)
|
57 |
-
|
58 |
|
59 |
-
def generate(self, story, api_key,
|
60 |
-
self.fps = fps
|
61 |
self.style = style
|
62 |
self.tags = tags
|
63 |
if model == "Stable Diffusion v2.1":
|
|
|
14 |
def get_sentences(self, story):
|
15 |
return self.preprocessor(story)
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
def generate_imgs(self, sentences, steps):
|
18 |
imgs = []
|
19 |
for sentence in sentences:
|
20 |
sentence['pos'] = self.style + ' of ' + sentence['pos'] + ', ' + self.tags
|
21 |
imgs.append(self.pipe.generate(prompt = sentence['pos'], negative_prompt = sentence['neg'], num_inference_steps = steps))
|
22 |
+
return imgs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
+
def generate(self, story, api_key, style, tags, model, steps):
|
|
|
25 |
self.style = style
|
26 |
self.tags = tags
|
27 |
if model == "Stable Diffusion v2.1":
|
app.py
CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
|
|
3 |
|
4 |
gen = Videobook()
|
5 |
with gr.Blocks() as demo:
|
|
|
6 |
with gr.Row():
|
7 |
with gr.Column():
|
8 |
story = gr.inputs.Textbox(lines = 5, label = "Story")
|
@@ -10,9 +11,7 @@ with gr.Blocks() as demo:
|
|
10 |
tags = gr.inputs.Textbox(default = "high quality, 3d render", label = "Tags")
|
11 |
style = gr.Dropdown(["Cartoon", "Anime Style", "Realistic Image"], value = "Cartoon", label = "Style")
|
12 |
model = gr.Dropdown(["Stable Diffusion v2.1", "Kadinsky"], value = "Stable Diffusion v2.1", label = "Model")
|
13 |
-
|
14 |
-
steps = gr.Radio([25, 50], value = 50, label = "Steps")
|
15 |
-
fps = gr.Radio([10, 24, 60], value = 10, label = "FPS")
|
16 |
output = gr.outputs.Video()
|
17 |
run = gr.Button(label = "Generate Video")
|
18 |
run.click(gen.generate, inputs = [story, api_key, fps, style, tags, model, steps], outputs = output)
|
|
|
3 |
|
4 |
gen = Videobook()
|
5 |
with gr.Blocks() as demo:
|
6 |
+
gr.Description("Videobook", "A tool to generate Narrated Videos from Story")
|
7 |
with gr.Row():
|
8 |
with gr.Column():
|
9 |
story = gr.inputs.Textbox(lines = 5, label = "Story")
|
|
|
11 |
tags = gr.inputs.Textbox(default = "high quality, 3d render", label = "Tags")
|
12 |
style = gr.Dropdown(["Cartoon", "Anime Style", "Realistic Image"], value = "Cartoon", label = "Style")
|
13 |
model = gr.Dropdown(["Stable Diffusion v2.1", "Kadinsky"], value = "Stable Diffusion v2.1", label = "Model")
|
14 |
+
steps = gr.Radio([25, 50], value = 50, label = "Steps")
|
|
|
|
|
15 |
output = gr.outputs.Video()
|
16 |
run = gr.Button(label = "Generate Video")
|
17 |
run.click(gen.generate, inputs = [story, api_key, fps, style, tags, model, steps], outputs = output)
|