Samarth0710 commited on
Commit
93a0737
·
1 Parent(s): eebca1a

Add to HF Spaces

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ /manimator/.env
Dockerfile ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Build stage
2
+ FROM python:3.11-slim as builder
3
+
4
+ # Configure apt and install build dependencies
5
+ RUN rm -f /etc/apt/apt.conf.d/docker-clean \
6
+ && echo "deb https://deb.debian.org/debian/ stable main" > /etc/apt/sources.list \
7
+ && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \
8
+ && apt-get update \
9
+ && apt-get install -y --no-install-recommends \
10
+ gcc \
11
+ g++ \
12
+ pkg-config \
13
+ ffmpeg \
14
+ libcairo2-dev \
15
+ libpango1.0-dev \
16
+ python3-dev \
17
+ texlive \
18
+ texlive-latex-extra \
19
+ make \
20
+ libpangocairo-1.0-0 \
21
+ dvisvgm \
22
+ && rm -rf /var/lib/apt/lists/*
23
+
24
+ # Install Poetry
25
+ RUN pip install --no-cache-dir poetry==1.7.1
26
+
27
+ WORKDIR /app
28
+ COPY pyproject.toml poetry.lock ./
29
+
30
+ # Install dependencies without project installation
31
+ RUN poetry config virtualenvs.create false \
32
+ && poetry install --no-root
33
+
34
+ # Runtime stage
35
+ FROM python:3.11-slim
36
+
37
+ # Install runtime dependencies
38
+ RUN apt-get update \
39
+ && echo "deb https://deb.debian.org/debian/ stable main" > /etc/apt/sources.list \
40
+ && apt-get install -y --no-install-recommends \
41
+ ffmpeg \
42
+ libcairo2 \
43
+ libpango1.0-0 \
44
+ libpangocairo-1.0-0 \
45
+ texlive \
46
+ texlive-latex-extra \
47
+ dvisvgm \
48
+ && rm -rf /var/lib/apt/lists/*
49
+
50
+ WORKDIR /app
51
+
52
+ # Copy built packages and application
53
+ COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages
54
+ COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
55
+ COPY manimator ./manimator
56
+
57
+ ENV PYTHONPATH=/app/manimator
58
+ # EXPOSE 8000
59
+ EXPOSE 7860
60
+ ENV GRADIO_SERVER_NAME="0.0.0.0"
61
+
62
+ # CMD ["python", "-m", "uvicorn", "manimator.main:app", "--host", "0.0.0.0", "--port", "8000"]
63
+ CMD ["python", "manimator/gradio_app.py"]
manimator/.env.example ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ SAMBANOVA_API_KEY=
2
+ DEEPSEEK_API_KEY=
3
+ GEMINI_API_KEY=
manimator/.gradio/cached_examples/15/Generated Animation/76b8f850c8e0823ff349/tmp_fgj4jjz.mp4 ADDED
Binary file (308 kB). View file
 
manimator/.gradio/cached_examples/15/Generated Animation/828914a7c2fcce16af8a/tmpy_f7knhq.mp4 ADDED
Binary file (382 kB). View file
 
manimator/.gradio/cached_examples/15/Generated Animation/94011f1ecb91ed97cf66/tmpri1hckr7.mp4 ADDED
Binary file (541 kB). View file
 
manimator/.gradio/cached_examples/15/log.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Generated Animation,Status,timestamp
2
+ "{""video"": {""path"": "".gradio/cached_examples/15/Generated Animation/94011f1ecb91ed97cf66/tmpri1hckr7.mp4"", ""url"": ""/gradio_api/file=/private/var/folders/6f/cjjq9y_d46q4s10zy4v3d3y00000gn/T/gradio/d3ce242f1241773003ed1ab4326e5690081b12121c4755d8e697dffb42396acd/tmpri1hckr7.mp4"", ""size"": null, ""orig_name"": ""tmpri1hckr7.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}",Animation generated successfully!,2025-01-05 23:30:10.138786
3
+ "{""video"": {""path"": "".gradio/cached_examples/15/Generated Animation/76b8f850c8e0823ff349/tmp_fgj4jjz.mp4"", ""url"": ""/gradio_api/file=/private/var/folders/6f/cjjq9y_d46q4s10zy4v3d3y00000gn/T/gradio/5778050647cd0c3e4cd3d3e5bce21fd1fd81ee9b4e052d8a76eb188d437d9feb/tmp_fgj4jjz.mp4"", ""size"": null, ""orig_name"": ""tmp_fgj4jjz.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}",Animation generated successfully!,2025-01-05 23:30:45.353879
4
+ "{""video"": {""path"": "".gradio/cached_examples/15/Generated Animation/828914a7c2fcce16af8a/tmpy_f7knhq.mp4"", ""url"": ""/gradio_api/file=/private/var/folders/6f/cjjq9y_d46q4s10zy4v3d3y00000gn/T/gradio/12a8840f0085cde86eecf8309c03ed2fda3aa656b34139cdd354b30542be4ace/tmpy_f7knhq.mp4"", ""size"": null, ""orig_name"": ""tmpy_f7knhq.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}",Animation generated successfully!,2025-01-05 23:31:25.217428
manimator/.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
manimator/__pycache__/main.cpython-312.pyc ADDED
Binary file (12.3 kB). View file
 
manimator/few_shot/BitNet.mp4 ADDED
Binary file (386 kB). View file
 
manimator/few_shot/CNNExplanation.mp4 ADDED
Binary file (673 kB). View file
 
manimator/few_shot/FourierTransformExplanation.mp4 ADDED
Binary file (322 kB). View file
 
manimator/few_shot/NeuralNetworksBackPropagationExample.mp4 ADDED
Binary file (389 kB). View file
 
manimator/few_shot/SVMExplanation.mp4 ADDED
Binary file (223 kB). View file
 
manimator/few_shot/few_shot_1.pdf ADDED
Binary file (597 kB). View file
 
manimator/gradio_app.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from main import (
3
+ generate_animation_response,
4
+ process_pdf_with_gemini,
5
+ process_prompt_scene,
6
+ ManimProcessor,
7
+ )
8
+ import re
9
+
10
+
11
+ def process_prompt(prompt: str):
12
+ max_attempts = 2
13
+ attempts = 0
14
+
15
+ while attempts < max_attempts:
16
+ try:
17
+ processor = ManimProcessor()
18
+ with processor.create_temp_dir() as temp_dir:
19
+ scene_description = process_prompt_scene(prompt)
20
+ response = generate_animation_response(scene_description)
21
+ code = processor.extract_code(response)
22
+
23
+ if not code:
24
+ attempts += 1
25
+ if attempts < max_attempts:
26
+ continue
27
+ return None, "No valid Manim code generated after multiple attempts"
28
+
29
+ class_match = re.search(r"class (\w+)\(Scene\)", code)
30
+ if not class_match:
31
+ attempts += 1
32
+ if attempts < max_attempts:
33
+ continue
34
+ return None, "No Scene class found after multiple attempts"
35
+
36
+ scene_name = class_match.group(1)
37
+ scene_file = processor.save_code(code, temp_dir)
38
+ video_path = processor.render_scene(scene_file, scene_name, temp_dir)
39
+
40
+ if not video_path:
41
+ return None, "Failed to render animation"
42
+
43
+ return video_path, "Animation generated successfully!"
44
+
45
+ except Exception as e:
46
+ attempts += 1
47
+ if attempts < max_attempts:
48
+ continue
49
+ return None, f"Error after multiple attempts: {str(e)}"
50
+
51
+
52
+ def process_pdf(file_path: str):
53
+ print("file_path", file_path)
54
+ try:
55
+ if not file_path:
56
+ return "Error: No file uploaded"
57
+ with open(file_path, "rb") as file_path:
58
+ file_bytes = file_path.read()
59
+ scene_description = process_pdf_with_gemini(file_bytes)
60
+ print("scene_description", scene_description)
61
+ return scene_description
62
+ except Exception as e:
63
+ return f"Error processing PDF: {str(e)}"
64
+
65
+
66
+ def interface_fn(prompt=None, pdf_file=None):
67
+ if prompt:
68
+ video_path, message = process_prompt(prompt)
69
+ if video_path:
70
+ return video_path, message
71
+ return None, message
72
+ elif pdf_file:
73
+ scene_description = process_pdf(pdf_file)
74
+ if scene_description:
75
+ video_path, message = process_prompt(scene_description)
76
+ if video_path:
77
+ return video_path, message
78
+ return None, message
79
+ return None, "Please provide either a prompt or upload a PDF file"
80
+
81
+
82
+ description_md = """
83
+ ## 🎬 manimator
84
+
85
+ This tool helps you create visualizations of complex concepts using natural language or PDF papers:
86
+
87
+ - **Text Prompt**: Describe the concept you want to visualize
88
+ - **PDF Upload**: Upload a research paper to extract key visualizations
89
+
90
+ ### Links
91
+ - [Manim Documentation](https://docs.manim.community/)
92
+ - [Project Repository](https://github.com/yourusername/manimator)
93
+ """
94
+
95
+ with gr.Blocks(title="manimator") as demo:
96
+ gr.Markdown(description_md)
97
+
98
+ with gr.Tabs():
99
+ with gr.TabItem("✍️ Text Prompt"):
100
+ with gr.Column():
101
+ text_input = gr.Textbox(
102
+ label="Describe the animation you want to create",
103
+ placeholder="Explain the working of neural networks",
104
+ lines=3,
105
+ )
106
+ text_button = gr.Button("Generate Animation from Text")
107
+
108
+ # Only show output UI elements here (not in the sample tab)
109
+ with gr.Row():
110
+ video_output = gr.Video(label="Generated Animation")
111
+ status_output = gr.Textbox(
112
+ label="Status", interactive=False, show_copy_button=True
113
+ )
114
+
115
+ text_button.click(
116
+ fn=interface_fn,
117
+ inputs=[text_input],
118
+ outputs=[video_output, status_output],
119
+ )
120
+
121
+ with gr.TabItem("📄 PDF Upload"):
122
+ with gr.Column():
123
+ file_input = gr.File(label="Upload a PDF paper", file_types=[".pdf"])
124
+ pdf_button = gr.Button("Generate Animation from PDF")
125
+
126
+ # Show output UI elements here as well
127
+ with gr.Row():
128
+ pdf_video_output = gr.Video(label="Generated Animation")
129
+ pdf_status_output = gr.Textbox(
130
+ label="Status", interactive=False, show_copy_button=True
131
+ )
132
+
133
+ pdf_button.click(
134
+ fn=lambda pdf: interface_fn(prompt=None, pdf_file=pdf),
135
+ inputs=[file_input],
136
+ outputs=[pdf_video_output, pdf_status_output],
137
+ )
138
+
139
+ with gr.TabItem("Sample Examples"):
140
+ sample_select = gr.Dropdown(
141
+ choices=[
142
+ "What is a CNN?",
143
+ "BitNet Paper",
144
+ "Explain Fourier Transform",
145
+ "How does backpropagation work in Neural Networks?",
146
+ "What is SVM?",
147
+ ],
148
+ label="Choose an example to display",
149
+ value=None,
150
+ )
151
+ sample_video = gr.Video()
152
+ sample_markdown = gr.Markdown()
153
+
154
+ def show_sample(example):
155
+ if example == "What is a CNN?":
156
+ return (
157
+ "./manimator/few_shot/CNNExplanation.mp4",
158
+ "Output: Example Output 1",
159
+ )
160
+ elif example == "BitNet Paper":
161
+ return "./manimator/few_shot/BitNet.mp4", "Output: Example Output 2"
162
+ elif example == "Explain Fourier Transform":
163
+ return (
164
+ "./manimator/few_shot/FourierTransformExplanation.mp4",
165
+ "Output: Example Output 3",
166
+ )
167
+ elif example == "How does backpropagation work in Neural Networks?":
168
+ return (
169
+ "./manimator/few_shot/NeuralNetworksBackPropagationExample.mp4",
170
+ "Output: Example Output 4",
171
+ )
172
+ elif example == "What is SVM?":
173
+ return (
174
+ "./manimator/few_shot/SVMExplanation.mp4",
175
+ "Output: Example Output 5",
176
+ )
177
+ return None, ""
178
+
179
+ sample_select.change(
180
+ fn=show_sample,
181
+ inputs=sample_select,
182
+ outputs=[sample_video, sample_markdown],
183
+ )
184
+
185
+ if __name__ == "__main__":
186
+ demo.launch(share=True)
manimator/main.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, File, UploadFile
2
+ from fastapi.responses import FileResponse
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ import re
5
+ import litellm
6
+ import base64
7
+ import requests
8
+ from dotenv import load_dotenv
9
+ from utils.models import PromptRequest
10
+ from utils.schema import ManimProcessor
11
+ from utils.prompts import MANIM_SYSTEM_PROMPT, SCENE_SYSTEM_PROMPT
12
+ from PyPDF2 import PdfReader, PdfWriter
13
+ import io
14
+
15
+ load_dotenv()
16
+
17
+ few_shot_pdf_bytes = open("./manimator/few_shot/few_shot_1.pdf", "rb").read()
18
+ few_shot_pdf = base64.b64encode(few_shot_pdf_bytes).decode("utf-8")
19
+
20
+ app = FastAPI()
21
+
22
+ app.add_middleware(
23
+ CORSMiddleware,
24
+ allow_origins=["*"],
25
+ allow_credentials=True,
26
+ allow_methods=["*"],
27
+ allow_headers=["*"],
28
+ )
29
+
30
+
31
+ def compress_pdf(content: bytes, compression_level: int = 5) -> str:
32
+ try:
33
+ reader = PdfReader(io.BytesIO(content))
34
+ output = io.BytesIO()
35
+ writer = PdfWriter(output)
36
+
37
+ for page in reader.pages:
38
+ writer.add_page(page)
39
+
40
+ writer.set_compression(compression_level)
41
+ writer.write(output)
42
+
43
+ compressed_bytes = output.getvalue()
44
+ return base64.b64encode(compressed_bytes).decode("utf-8")
45
+ except Exception as e:
46
+ return base64.b64encode(content).decode("utf-8")
47
+
48
+
49
+ def generate_animation_response(prompt: str) -> str:
50
+ try:
51
+ messages = [
52
+ {
53
+ "role": "system",
54
+ "content": MANIM_SYSTEM_PROMPT,
55
+ },
56
+ {
57
+ "role": "user",
58
+ "content": f"{prompt}\n\n NOTE!!!: Make sure the objects or text in the generated code are not overlapping at any point in the video. Make sure that each scene is properly cleaned up before transitioning to the next scene.",
59
+ },
60
+ ]
61
+ response = litellm.completion(
62
+ model="deepseek/deepseek-chat", messages=messages, num_retries=2
63
+ )
64
+ return response.choices[0].message.content
65
+ except Exception as e:
66
+ raise HTTPException(
67
+ status_code=500, detail=f"Failed to generate animation response: {str(e)}"
68
+ )
69
+
70
+
71
+ def process_prompt_scene(prompt: str) -> str:
72
+ messages = [
73
+ {
74
+ "role": "system",
75
+ "content": SCENE_SYSTEM_PROMPT,
76
+ },
77
+ {
78
+ "role": "user",
79
+ "content": "Fourier Transform",
80
+ },
81
+ {
82
+ "role": "assistant",
83
+ "content": r"""### *Topic*: Fourier Transform
84
+ *Key Points*:
85
+ - Time domain vs frequency domain
86
+ - Decomposing signals into sine waves: \( f(t) = \sum_{n} A_n \sin(2\pi n t + \phi_n) \)
87
+ - Fourier Transform formula: \( \hat{f}(\xi) = \int_{-\infty}^{\infty} f(t) e^{-2\pi i \xi t} dt \)
88
+ - Applications in signal processing
89
+ *Visual Elements*:
90
+ - Animate a complex signal being decomposed into sine waves.
91
+ - Show the Fourier Transform as a graph in the frequency domain.
92
+ *Style*: Smooth animations with mathematical formulas and graphs.""",
93
+ },
94
+ {
95
+ "role": "user",
96
+ "content": "Explain Gradient Descent",
97
+ },
98
+ {
99
+ "role": "assistant",
100
+ "content": r"""### *Topic*: Gradient Descent
101
+ *Key Points*:
102
+ - Loss function: \( L(\theta) = \frac{1}{N} \sum_{i=1}^{N} (y_i - f(x_i; \theta))^2 \)
103
+ - Gradient calculation: \( \nabla L(\theta) = \frac{\partial L}{\partial \theta} \)
104
+ - Update rule: \( \theta_{new} = \theta_{old} - \alpha \nabla L(\theta) \)
105
+ - Convergence to the minimum
106
+ *Visual Elements*:
107
+ - 3D surface plot of the loss function.
108
+ - Animate the gradient descent steps moving toward the minimum.
109
+ *Style*: 3D visualizations with step-by-step explanations.""",
110
+ },
111
+ {
112
+ "role": "user",
113
+ "content": "How does backpropogation work in neural networks?",
114
+ },
115
+ {
116
+ "role": "assistant",
117
+ "content": r"""### *Topic*: Neural Networks (Backpropagation)
118
+ *Key Points*:
119
+ - Loss function: \( L(\theta) = \frac{1}{N} \sum_{i=1}^{N} (y_i - f(x_i; \theta))^2 \)
120
+ - Chain rule: \( \frac{\partial L}{\partial f} \frac{\partial f}{\partial \theta} \)
121
+ - Weight updates: \( \theta_{new} = \theta_{old} - \alpha \nabla L(\theta) \)
122
+ *Visual Elements*:
123
+ - Animate the flow of gradients through the network.
124
+ - Show the loss surface and gradient descent steps.
125
+ *Style*: Step-by-step, with clear visualizations of gradients and updates.""",
126
+ },
127
+ {
128
+ "role": "user",
129
+ "content": prompt,
130
+ },
131
+ ]
132
+ response = litellm.completion(
133
+ model="sambanova/Meta-Llama-3.3-70B-Instruct",
134
+ messages=messages,
135
+ num_retries=2,
136
+ )
137
+ return response.choices[0].message.content
138
+
139
+
140
+ def process_pdf_with_gemini(
141
+ file_content: bytes, model="gemini/gemini-1.5-flash", retry=False
142
+ ) -> str:
143
+ encoded_pdf = compress_pdf(file_content)
144
+ try:
145
+ response = litellm.completion(
146
+ model=model,
147
+ messages=[
148
+ {"role": "system", "content": SCENE_SYSTEM_PROMPT},
149
+ {
150
+ "role": "user",
151
+ "content": [
152
+ {
153
+ "type": "image_url",
154
+ "image_url": "data:application/pdf;base64,{}".format(
155
+ few_shot_pdf
156
+ ),
157
+ },
158
+ ],
159
+ },
160
+ {
161
+ "role": "assistant",
162
+ "content": r"""*Topic*: Deep Residual Learning for Image Recognition
163
+ *Key Points*:
164
+ 1. *Degradation Problem*: Explain how deeper networks suffer from higher training error despite having more capacity.
165
+ 2. *Residual Learning*: Show how residual learning reformulates the problem by learning residual functions \( \mathcal{F}(\mathbf{x}) = \mathcal{H}(\mathbf{x}) - \mathbf{x} \) instead of direct mappings \( \mathcal{H}(\mathbf{x}) \).
166
+ 3. *Shortcut Connections*: Visualize how identity shortcuts (skip connections) are added to the network to enable residual learning.
167
+ 4. *Deep Residual Networks*: Demonstrate the architecture of deep residual networks (e.g., ResNet-34, ResNet-152) and how they outperform plain networks.
168
+ 5. *Bottleneck Design*: Explain the bottleneck design in deeper ResNets (e.g., ResNet-50/101/152) using \(1 \times 1\), \(3 \times 3\), and \(1 \times 1\) convolutions.
169
+
170
+ *Style*: 3Blue1Brown style (clean, minimalistic, with smooth animations and clear labels)
171
+ *Additional Requirements*:
172
+ - Include mathematical formulas (e.g., \( \mathcal{F}(\mathbf{x}) = \mathcal{H}(\mathbf{x}) - \mathbf{x} \)) and graphs (e.g., training error vs. depth).
173
+ - Use color coding to differentiate between plain networks and residual networks.
174
+ - Animate the flow of data through shortcut connections and residual blocks.
175
+ - Provide step-by-step explanations for each concept.""",
176
+ },
177
+ {
178
+ "role": "user",
179
+ "content": [
180
+ {
181
+ "type": "image_url",
182
+ "image_url": "data:application/pdf;base64,{}".format(
183
+ encoded_pdf
184
+ ),
185
+ },
186
+ ],
187
+ },
188
+ ],
189
+ )
190
+ return response.choices[0].message.content
191
+ except Exception as e:
192
+ if retry == False:
193
+ return process_pdf_with_gemini(
194
+ file_content,
195
+ model="gemini/gemini-2.0-flash-exp",
196
+ retry=True,
197
+ )
198
+ else:
199
+ raise HTTPException(
200
+ status_code=500, detail=f"Failed to process PDF with Gemini: {str(e)}"
201
+ )
202
+
203
+
204
+ def download_arxiv_pdf(url: str) -> bytes:
205
+ """Download PDF from arxiv URL"""
206
+ try:
207
+ response = requests.get(url)
208
+ response.raise_for_status()
209
+ return response.content
210
+ except Exception as e:
211
+ raise HTTPException(
212
+ status_code=500, detail=f"Failed to download arxiv PDF: {str(e)}"
213
+ )
214
+
215
+
216
+ @app.get("/health-check")
217
+ async def health_check():
218
+ return {"status": "ok"}
219
+
220
+
221
+ @app.post("/generate-pdf-scene")
222
+ async def generate_pdf_scene(file: UploadFile = File(...)):
223
+ try:
224
+ content = await file.read()
225
+ scene_description = process_pdf_with_gemini(content)
226
+ return {"scene_description": scene_description}
227
+ except Exception as e:
228
+ raise HTTPException(status_code=500, detail=str(e))
229
+
230
+
231
+ @app.post("/generate-prompt-scene")
232
+ async def generate_prompt_scene(request: PromptRequest):
233
+ try:
234
+ return {"scene_description": process_prompt_scene(request.prompt)}
235
+ except Exception as e:
236
+ raise HTTPException(
237
+ status_code=500, detail=f"Error generating scene descriptions: {str(e)}"
238
+ )
239
+
240
+
241
+ @app.get("/pdf/{arxiv_id}")
242
+ async def process_arxiv_by_id(arxiv_id: str):
243
+ """Process arxiv paper by ID"""
244
+ try:
245
+ arxiv_url = f"https://arxiv.org/pdf/{arxiv_id}"
246
+ pdf_content = download_arxiv_pdf(arxiv_url)
247
+ scene_description = process_pdf_with_gemini(pdf_content)
248
+ return {"scene_description": scene_description}
249
+ except Exception as e:
250
+ raise HTTPException(status_code=500, detail=str(e))
251
+
252
+
253
+ @app.post("/generate-animation")
254
+ async def generate_animation(request: PromptRequest):
255
+ processor = ManimProcessor()
256
+
257
+ try:
258
+ with processor.create_temp_dir() as temp_dir:
259
+ response = generate_animation_response(request.prompt)
260
+ code = processor.extract_code(response)
261
+ if not code:
262
+ raise HTTPException(
263
+ status_code=400, detail="No valid Manim code generated"
264
+ )
265
+ class_match = re.search(r"class (\w+)\(Scene\)", code)
266
+ if not class_match:
267
+ raise HTTPException(
268
+ status_code=400, detail="No Scene class found in code"
269
+ )
270
+ scene_name = class_match.group(1)
271
+ scene_file = processor.save_code(code, temp_dir)
272
+ video_path = processor.render_scene(scene_file, scene_name, temp_dir)
273
+ if not video_path:
274
+ raise HTTPException(
275
+ status_code=500, detail="Failed to render animation"
276
+ )
277
+ return FileResponse(video_path, media_type="video/mp4")
278
+ except Exception as e:
279
+ raise HTTPException(status_code=500, detail=str(e))
280
+
281
+
282
+ if __name__ == "__main__":
283
+ import uvicorn
284
+
285
+ uvicorn.run(app, host="0.0.0.0", port=8000)
manimator/utils/__pycache__/models.cpython-312.pyc ADDED
Binary file (436 Bytes). View file
 
manimator/utils/__pycache__/prompts.cpython-312.pyc ADDED
Binary file (10.9 kB). View file
 
manimator/utils/__pycache__/schema.cpython-312.pyc ADDED
Binary file (4.46 kB). View file
 
manimator/utils/models.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class PromptRequest(BaseModel):
5
+ prompt: str
manimator/utils/prompts.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MANIM_SYSTEM_PROMPT = """```You are an expert in creating educational animations using Manim. Your task is to generate Python code for a Manim animation that visually explains a given topic or concept. Follow these steps:
2
+
3
+ 1. **Understand the Topic**:
4
+ - Analyze the user's topic to identify the key concepts that need to be visualized.
5
+ - Break down the topic into smaller, digestible components (e.g., steps, mechanisms, equations).
6
+
7
+ 2. **Plan the Animation**:
8
+ - Create a storyboard for the animation, ensuring it flows logically from one concept to the next.
9
+ - Decide on the visual elements (e.g., shapes, graphs, text) that will represent each concept.
10
+ - Ensure all elements stay within the screen's aspect ratio (-7.5 to 7.5 on x-axis, -4 to 4 on y-axis).
11
+ - Plan proper spacing between elements to avoid overlap.
12
+ - Make sure the objects or text in the generated code are not overlapping at any point in the video.
13
+ - Make sure that each scene is properly cleaned up before transitioning to the next scene.
14
+
15
+ 3. **Write the Manim Code**:
16
+ - Use Manim's library to create the animation. Include comments in the code to explain each step.
17
+ - Ensure the code is modular, with separate functions for each key concept.
18
+ - Use a consistent style (e.g., 3Blue1Brown style) with appropriate colors, labels, and animations.
19
+ - Implement clean transitions between scenes by removing all elements from previous scene
20
+ - Use self.play(FadeOut(*self.mobjects)) at the end of each scene.
21
+ - Add wait() calls after important animations for better pacing.
22
+ - Make sure the objects or text in the generated code are not overlapping at any point in the video.
23
+ - Make sure that each scene is properly cleaned up before transitioning to the next scene.
24
+
25
+ 4. **Output the Code**:
26
+ - Provide the complete Python script that can be run using Manim.
27
+ - Include instructions on how to run the script (e.g., command to render the animation).
28
+ - Verify all scenes have proper cleanup and transitions.
29
+
30
+ **Example Input**:
31
+ - Topic: "Neural Networks"
32
+ - Key Points: "neurons and layers, weights and biases, activation functions"
33
+ - Style: "3Blue1Brown style"
34
+
35
+ **Example Output** (only for your reference, do not use this exact code in your outputs):
36
+ ```python
37
+ from manim import *
38
+
39
+ class NeuralNetworkExplanation(Scene):
40
+ def construct(self):
41
+ # Title
42
+ title = Text("Neural Networks Explained", font_size=40, color=BLUE)
43
+ self.play(Write(title))
44
+ self.wait(2)
45
+ self.play(FadeOut(title))
46
+
47
+ # Introduction to Neural Networks
48
+ intro = Text("Key Components of a Neural Network", font_size=35)
49
+ self.play(Write(intro))
50
+ self.wait(2)
51
+ self.play(FadeOut(intro))
52
+
53
+ # Show the overall structure of a neural network
54
+ self.show_neural_network_structure()
55
+ self.wait(2)
56
+
57
+ # Explain neurons and layers
58
+ self.explain_neurons_and_layers()
59
+ self.wait(2)
60
+
61
+ # Explain weights and biases
62
+ self.explain_weights_and_biases()
63
+ self.wait(2)
64
+
65
+ # Explain activation functions
66
+ self.explain_activation_functions()
67
+ self.wait(2)
68
+
69
+ def show_neural_network_structure(self):
70
+ # Create layers
71
+ input_layer = self.create_layer(3, "Input Layer", BLUE)
72
+ hidden_layer = self.create_layer(4, "Hidden Layer", GREEN)
73
+ output_layer = self.create_layer(2, "Output Layer", RED)
74
+
75
+ # Arrange layers horizontally
76
+ layers = VGroup(input_layer, hidden_layer, output_layer).arrange(RIGHT, buff=2)
77
+ self.play(Create(layers))
78
+ self.wait(1)
79
+
80
+ # Add connections between layers
81
+ connections = self.create_connections(input_layer, hidden_layer) + self.create_connections(hidden_layer, output_layer)
82
+ self.play(Create(connections))
83
+ self.wait(2)
84
+
85
+ # Cleanup
86
+ self.play(FadeOut(layers), FadeOut(connections))
87
+
88
+ def create_layer(self, num_neurons, label, color):
89
+ # Create a layer of neurons.
90
+ neurons = VGroup(*[Circle(radius=0.3, color=color) for _ in range(num_neurons)])
91
+ neurons.arrange(DOWN, buff=0.5)
92
+ layer_label = Text(label, font_size=20).next_to(neurons, UP)
93
+ return VGroup(neurons, layer_label)
94
+
95
+ def create_connections(self, layer1, layer2):
96
+ # Create connections between two layers.
97
+ connections = VGroup()
98
+ for neuron1 in layer1[0]:
99
+ for neuron2 in layer2[0]:
100
+ connection = Line(neuron1.get_right(), neuron2.get_left(), color=WHITE, stroke_width=1)
101
+ connections.add(connection)
102
+ return connections
103
+
104
+ def explain_neurons_and_layers(self):
105
+ # Title
106
+ title = Text("Neurons and Layers", font_size=35, color=BLUE)
107
+ self.play(Write(title))
108
+ self.wait(1)
109
+ self.play(FadeOut(title))
110
+
111
+ # Create a single neuron
112
+ neuron = Circle(radius=0.5, color=GREEN)
113
+ neuron_label = Text("Neuron", font_size=20).next_to(neuron, DOWN)
114
+
115
+ # Create a layer of neurons
116
+ layer = self.create_layer(3, "Layer", BLUE)
117
+
118
+ # Arrange
119
+ group = VGroup(neuron, layer).arrange(RIGHT, buff=2)
120
+ self.play(Create(neuron), Write(neuron_label))
121
+ self.play(Create(layer))
122
+ self.wait(2)
123
+
124
+ # Cleanup
125
+ self.play(FadeOut(neuron), FadeOut(neuron_label), FadeOut(layer))
126
+
127
+ def explain_weights_and_biases(self):
128
+ # Title
129
+ title = Text("Weights and Biases", font_size=35, color=BLUE)
130
+ self.play(Write(title))
131
+ self.wait(1)
132
+ self.play(FadeOut(title))
133
+
134
+ # Create two neurons
135
+ neuron1 = Circle(radius=0.3, color=GREEN)
136
+ neuron2 = Circle(radius=0.3, color=GREEN)
137
+ neurons = VGroup(neuron1, neuron2).arrange(RIGHT, buff=2)
138
+
139
+ # Add a connection with weight and bias
140
+ connection = Line(neuron1.get_right(), neuron2.get_left(), color=WHITE)
141
+ weight_label = Text("Weight (w)", font_size=16).next_to(connection, UP)
142
+ bias_label = Text("Bias (b)", font_size=16).next_to(neuron2, DOWN)
143
+
144
+ self.play(Create(neurons))
145
+ self.play(Create(connection), Write(weight_label), Write(bias_label))
146
+ self.wait(2)
147
+
148
+ # Cleanup
149
+ self.play(FadeOut(neurons), FadeOut(connection), FadeOut(weight_label), FadeOut(bias_label))
150
+
151
+ def explain_activation_functions(self):
152
+ # Title
153
+ title = Text("Activation Functions", font_size=35, color=BLUE)
154
+ self.play(Write(title))
155
+ self.wait(1)
156
+ self.play(FadeOut(title))
157
+
158
+ # Create axes
159
+ axes = Axes(x_range=[-3, 3], y_range=[-1, 3], axis_config={"color": BLUE})
160
+
161
+ # Plot ReLU
162
+ relu_graph = axes.plot(lambda x: max(0, x), color=GREEN)
163
+ relu_label = Text("ReLU(x) = max(0, x)", font_size=20).next_to(axes, UP)
164
+
165
+ # Plot Sigmoid
166
+ sigmoid_graph = axes.plot(lambda x: 1 / (1 + np.exp(-x)), color=RED)
167
+ sigmoid_label = Text("Sigmoid(x) = 1 / (1 + e^-x)", font_size=20).next_to(axes, UP)
168
+
169
+ # Animate
170
+ self.play(Create(axes))
171
+ self.play(Create(relu_graph), Write(relu_label))
172
+ self.wait(1)
173
+ self.play(Transform(relu_graph, sigmoid_graph), Transform(relu_label, sigmoid_label))
174
+ self.wait(2)
175
+
176
+ # Cleanup
177
+ self.play(FadeOut(axes), FadeOut(sigmoid_graph), FadeOut(sigmoid_label))
178
+
179
+ # Run the animation
180
+ if __name__ == "__main__":
181
+ scene = NeuralNetworkExplanation()
182
+ scene.render()```
183
+
184
+ NOTE!!!: Make sure the objects or text in the generated code are not overlapping at any point in the video. Make sure that each scene is properly cleaned up before transitioning to the next scene."""
185
+
186
+
187
+ SCENE_SYSTEM_PROMPT = """# Content Structure System
188
+
189
+ When presented with any research paper, topic, question, or material, transform it into the following structured format:
190
+
191
+ ## Basic Structure
192
+ For each topic or concept, organize the information as follows:
193
+
194
+ 1. **Topic**: [Main subject or concept name]
195
+
196
+ **Key Points**:
197
+ * 3-4 core concepts or fundamental principles
198
+ * Include relevant mathematical formulas where applicable
199
+ * Each point should be substantive and detailed
200
+ * Focus on foundational understanding
201
+
202
+ **Visual Elements**:
203
+ * 2-3 suggested visualizations or animations
204
+ * Emphasis on dynamic representations where appropriate
205
+ * Clear connection to key points
206
+
207
+ **Style**:
208
+ * Brief description of visual presentation approach
209
+ * Tone and aesthetic guidelines
210
+ * Specific effects or animation suggestions
211
+
212
+ ## Formatting Rules
213
+
214
+ 1. Mathematical Formulas:
215
+ - Use proper mathematical notation
216
+ - Include both symbolic and descriptive forms
217
+ - Ensure formulas are relevant to key concepts
218
+
219
+ 2. Visual Elements:
220
+ - Start each bullet with an action verb (Show, Animate, Demonstrate)
221
+ - Focus on dynamic rather than static representations
222
+ - Include specific details about what should be visualized
223
+
224
+ 3. Style Guidelines:
225
+ - Keep to 1-2 sentences
226
+ - Include both visual and presentational elements
227
+ - Match style to content type (e.g., "geometric" for math, "organic" for biology)
228
+
229
+ ## Content Guidelines
230
+
231
+ 1. Key Points Selection:
232
+ - Choose foundational concepts over advanced applications
233
+ - Include quantitative elements where relevant
234
+ - Balance theory with practical understanding
235
+ - Prioritize interconnected concepts
236
+
237
+ 2. Visual Elements Selection:
238
+ - Focus on elements that clarify complex concepts
239
+ - Emphasize dynamic processes over static states
240
+ - Include both macro and micro level visualizations
241
+ - Suggest interactive elements where appropriate
242
+
243
+ 3. Style Development:
244
+ - Match aesthetic to subject matter
245
+ - Consider audience engagement
246
+ - Incorporate field-specific conventions
247
+ - Balance technical accuracy with visual appeal
248
+
249
+ ## Example Format:
250
+
251
+
252
+ *Topic*: [Subject Name]
253
+ *Key Points*:
254
+ * [Core concept with mathematical formula if applicable]
255
+ * [Fundamental principle]
256
+ * [Essential relationship or process]
257
+ * [Key application or implication]
258
+
259
+ *Visual Elements*:
260
+ * [Primary visualization with specific details]
261
+ * [Secondary visualization with animation suggestions]
262
+ * [Supporting visual element]
263
+
264
+ *Style*: [Visual approach and specific effects]
265
+
266
+ ## Implementation Notes:
267
+
268
+ 1. Maintain consistency in depth and detail across all topics
269
+ 2. Ensure mathematical notation is precise and relevant
270
+ 3. Make visual suggestions specific and actionable
271
+ 4. Keep style descriptions concise but informative
272
+ 5. Adapt format based on subject matter while maintaining structure
273
+
274
+ When processing input:
275
+ 1. First identify core concepts
276
+ 2. Organize into key points with relevant formulas
277
+ 3. Develop appropriate visual representations
278
+ 4. Define suitable style approach
279
+ 5. Review for completeness and consistency"""
manimator/utils/schema.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import subprocess
4
+ import tempfile
5
+ from contextlib import contextmanager
6
+ from typing import Optional
7
+ import logging
8
+ from fastapi import HTTPException
9
+
10
+
11
+ # logging.basicConfig(level=logging.DEBUG)
12
+ # logger = logging.getLogger(__name__)
13
+
14
+
15
+ class ManimProcessor:
16
+ @contextmanager
17
+ def create_temp_dir(self):
18
+ temp_dir = tempfile.mkdtemp()
19
+ try:
20
+ yield temp_dir
21
+ finally:
22
+ if os.path.exists(temp_dir):
23
+ for root, dirs, files in os.walk(temp_dir, topdown=False):
24
+ for name in files:
25
+ os.remove(os.path.join(root, name))
26
+ for name in dirs:
27
+ os.rmdir(os.path.join(root, name))
28
+ os.rmdir(temp_dir)
29
+
30
+ def extract_code(self, rag_response: str) -> Optional[str]:
31
+ pattern = r"```python\n(.*?)```"
32
+ match = re.search(pattern, rag_response, re.DOTALL)
33
+ return match.group(1).strip() if match else None
34
+
35
+ def save_code(self, code: str, temp_dir: str) -> str:
36
+ scene_file = os.path.join(temp_dir, "scene.py")
37
+ with open(scene_file, "w") as f:
38
+ f.write("from manim import *\n\n")
39
+ f.write(code)
40
+ return scene_file
41
+
42
+ def render_scene(
43
+ self, scene_file: str, scene_name: str, temp_dir: str
44
+ ) -> Optional[str]:
45
+ cmd = [
46
+ "python",
47
+ "-m",
48
+ "manim",
49
+ "-pql",
50
+ "--media_dir",
51
+ temp_dir,
52
+ scene_file,
53
+ scene_name,
54
+ ]
55
+
56
+ try:
57
+ subprocess.run(cmd, check=True, capture_output=True, text=True)
58
+ video_path = os.path.join(
59
+ temp_dir, "videos", "scene", "480p15", f"{scene_name}.mp4"
60
+ )
61
+
62
+ if not os.path.exists(video_path):
63
+ return None
64
+
65
+ temp_video = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
66
+ with open(video_path, "rb") as f:
67
+ temp_video.write(f.read())
68
+ return temp_video.name
69
+
70
+ except subprocess.CalledProcessError as e:
71
+ raise HTTPException(status_code=500, detail=f"Render error: {e.stderr}")
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.115.6
2
+ uvicorn==0.34.0
3
+ python-dotenv==1.0.1
4
+ litellm==1.56.10
5
+ python-multipart==0.0.20
6
+ tenacity==9.0.0
7
+ manim==0.18.1
8
+ pypdf2==3.0.1
9
+ gradio==5.9.1