paper-reviewer / app.py
Ashish Rai
add Anthropic and OpenAI API
ed2f560
import anthropic
from openai import OpenAI
import gradio as gr
import os
from prompts import codebook
_PROMPT_STYLES = [prompt['prompt_style'] for prompt in codebook]
# Function to call Claude API
def call_claude_api(input_text, prompt):
api_key = os.environ.get("ANTHROPIC_API_KEY")
client = anthropic.Anthropic(api_key=api_key)
responses = []
messages = []
print("Using prompt style: ", prompt['prompt_style'])
for prompt_message in prompt['prompts']:
message = {"role": "user", "content": prompt_message}
messages.append(message)
response = client.messages.create(
model="claude-3-5-sonnet-20240620",
max_tokens=1024,
temperature=0,
system=prompt['system_prompt'] + input_text,
messages=messages
)
messages.append({
"role": "assistant",
"content": response.content[0].text
})
responses.append(response.content[0].text)
return responses
def call_openai_api(input_text, prompt):
api_key = os.environ.get("OPENAI_API_KEY")
org_id = os.environ.get("OPENAI_ORG_ID")
project_id = os.environ.get("OPENAI_PROJECT_ID")
client = OpenAI(
api_key=api_key,
organization=org_id,
project=project_id
)
responses = []
messages = [{
"role": "system", "content": prompt['system_prompt'] + input_text
}]
print("Using prompt style: ", prompt['prompt_style'])
for prompt_message in prompt['prompts']:
message = {"role": "user", "content": prompt_message}
messages.append(message)
response = client.chat.completions.create(
model="gpt-4o-mini",
max_tokens=1024,
temperature=0,
messages=messages
)
messages.append({
"role": "assistant",
"content": response.choices[0].message.content
})
responses.append(response.choices[0].message.content)
return responses
def process_file(text_file, prompt_style_key):
with open(text_file.name, "r") as fd:
content = fd.read()
for prompt in codebook:
if prompt['prompt_style'] == prompt_style_key:
selected_prompt = prompt
break
# responses = call_claude_api(content, prompt=selected_prompt)
responses = call_openai_api(content, prompt=selected_prompt)
sections = selected_prompt["sections"]
final_response = ''
for section, response in zip(sections, responses):
final_response += f"# {section}\n{response}\n\n"
return final_response
# Gradio Components
file_upload = gr.File(label="Upload your paper (only .txt files supported)")
dropdown = gr.Dropdown(choices=list(_PROMPT_STYLES), label="Research domain", value=_PROMPT_STYLES[0])
output = gr.Textbox(label="Critique")
# Gradio Interface
iface = gr.Interface(
fn=process_file,
inputs=[file_upload, dropdown],
outputs=output,
title="Alignment Research Critiquer",
description="An LLM-based app to critique AI alignment research papers."
)
if __name__ == "__main__":
iface.launch()