Spaces:
Running
Running
mihalykiss
commited on
Commit
Β·
c57c848
1
Parent(s):
0010ee8
app.py and reqs.txt
Browse files- app.py +110 -0
- requirements.txt +3 -0
app.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
3 |
+
import torch
|
4 |
+
|
5 |
+
model_path = "modernbert.bin"
|
6 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
7 |
+
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base")
|
9 |
+
model = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
|
10 |
+
model.load_state_dict(torch.load(model_path, map_location=device))
|
11 |
+
model.to(device)
|
12 |
+
model.eval()
|
13 |
+
|
14 |
+
label_mapping = {
|
15 |
+
0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b',
|
16 |
+
6: 'bloomz', 7: 'cohere', 8: 'davinci', 9: 'dolly', 10: 'dolly-v2-12b',
|
17 |
+
11: 'flan_t5_base', 12: 'flan_t5_large', 13: 'flan_t5_small',
|
18 |
+
14: 'flan_t5_xl', 15: 'flan_t5_xxl', 16: 'gemma-7b-it', 17: 'gemma2-9b-it',
|
19 |
+
18: 'gpt-3.5-turbo', 19: 'gpt-35', 20: 'gpt4', 21: 'gpt4o',
|
20 |
+
22: 'gpt_j', 23: 'gpt_neox', 24: 'human', 25: 'llama3-70b', 26: 'llama3-8b',
|
21 |
+
27: 'mixtral-8x7b', 28: 'opt_1.3b', 29: 'opt_125m', 30: 'opt_13b',
|
22 |
+
31: 'opt_2.7b', 32: 'opt_30b', 33: 'opt_350m', 34: 'opt_6.7b',
|
23 |
+
35: 'opt_iml_30b', 36: 'opt_iml_max_1.3b', 37: 't0_11b', 38: 't0_3b',
|
24 |
+
39: 'text-davinci-002', 40: 'text-davinci-003'
|
25 |
+
}
|
26 |
+
|
27 |
+
def classify_text(text):
|
28 |
+
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
|
29 |
+
inputs = {key: value.to(device) for key, value in inputs.items()}
|
30 |
+
|
31 |
+
with torch.no_grad():
|
32 |
+
outputs = model(**inputs)
|
33 |
+
probabilities = torch.softmax(outputs.logits, dim=1)[0]
|
34 |
+
predicted_class = torch.argmax(probabilities).item()
|
35 |
+
confidence = probabilities[predicted_class].item()
|
36 |
+
|
37 |
+
if predicted_class == 24:
|
38 |
+
prediction_label = "β
**Human Written**"
|
39 |
+
confidence_message = f"π **Confidence:** {confidence:.2f}"
|
40 |
+
if confidence > 0.8:
|
41 |
+
confidence_message += " (Highly Likely Human)"
|
42 |
+
else:
|
43 |
+
prediction_label = f"π€ **AI Generated by {label_mapping[predicted_class]}**"
|
44 |
+
confidence_message = f"π **Confidence:** {confidence:.2f}"
|
45 |
+
if confidence > 0.8:
|
46 |
+
confidence_message += " (Highly Likely AI)"
|
47 |
+
|
48 |
+
return f"**Result:**\n\n{prediction_label}\n\n{confidence_message}"
|
49 |
+
|
50 |
+
title = "π§ SzegedAI ModernBERT Text Detector"
|
51 |
+
description = (
|
52 |
+
"""
|
53 |
+
**AI Detection Tool by SzegedAI**
|
54 |
+
|
55 |
+
**Detect AI-generated texts with precision.** This tool uses the new **ModernBERT** model, fine-tuned for machine-generated text detection, and able to detect 40 different models.
|
56 |
+
|
57 |
+
- **π€ Identify AI Models**: If detected as AI-generated, the system will reveal which LLM was responsible for the text generation.
|
58 |
+
- **β
Human Verification**: If confidently human, the result will be marked with a **green checkmark**.
|
59 |
+
|
60 |
+
**Press the button below to classify your text!**
|
61 |
+
"""
|
62 |
+
)
|
63 |
+
|
64 |
+
iface = gr.Interface(
|
65 |
+
fn=classify_text,
|
66 |
+
inputs=gr.Textbox(
|
67 |
+
label="βοΈ Enter Text for Analysis",
|
68 |
+
placeholder="Type or paste your content here...",
|
69 |
+
lines=5,
|
70 |
+
elem_id="text_input_box"
|
71 |
+
),
|
72 |
+
outputs=gr.Textbox(
|
73 |
+
label="Detection Results",
|
74 |
+
lines=4,
|
75 |
+
elem_id="result_output_box"
|
76 |
+
),
|
77 |
+
title=title,
|
78 |
+
description=description,
|
79 |
+
theme="dark",
|
80 |
+
allow_flagging="never",
|
81 |
+
live=False,
|
82 |
+
submit_button="π― Analyze Now",
|
83 |
+
css="""
|
84 |
+
#text_input_box, #result_output_box {
|
85 |
+
border-radius: 10px;
|
86 |
+
border: 2px solid #4CAF50;
|
87 |
+
font-size: 18px;
|
88 |
+
}
|
89 |
+
body {
|
90 |
+
background: #1E1E2F;
|
91 |
+
color: #E1E1E6;
|
92 |
+
font-family: 'Aptos', sans-serif;
|
93 |
+
padding: 20px;
|
94 |
+
}
|
95 |
+
.gradio-container {
|
96 |
+
border: 2px solid #4CAF50;
|
97 |
+
border-radius: 15px;
|
98 |
+
padding: 20px;
|
99 |
+
box-shadow: 0px 0px 20px rgba(0,255,0,0.6);
|
100 |
+
}
|
101 |
+
h1, h2 {
|
102 |
+
text-align: center;
|
103 |
+
font-size: 32px;
|
104 |
+
font-weight: bold;
|
105 |
+
}
|
106 |
+
"""
|
107 |
+
)
|
108 |
+
|
109 |
+
if __name__ == "__main__":
|
110 |
+
iface.launch(share=True)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
torch
|
3 |
+
git+https://github.com/huggingface/transformers
|