Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -132,9 +132,9 @@ def get_summarizer():
|
|
| 132 |
|
| 133 |
|
| 134 |
MODEL_CHOICES = [
|
| 135 |
-
"
|
| 136 |
-
"
|
| 137 |
-
"
|
| 138 |
]
|
| 139 |
|
| 140 |
qa_pipeline = None
|
|
@@ -874,7 +874,7 @@ async def question_answering(
|
|
| 874 |
file: Optional[UploadFile] = File(None)
|
| 875 |
):
|
| 876 |
if qa_pipeline is None:
|
| 877 |
-
raise HTTPException(503, detail="
|
| 878 |
|
| 879 |
try:
|
| 880 |
# Process file if provided
|
|
@@ -882,66 +882,65 @@ async def question_answering(
|
|
| 882 |
if file:
|
| 883 |
_, content = await process_uploaded_file(file)
|
| 884 |
full_text = extract_text(content, file.filename.split('.')[-1])
|
| 885 |
-
context = re.sub(r'\s+', ' ', full_text).strip()[:
|
| 886 |
|
| 887 |
-
# Special handling for theme
|
| 888 |
-
theme_keywords = ["thème", "theme", "sujet principal", "quoi le sujet"
|
| 889 |
if any(kw in question.lower() for kw in theme_keywords):
|
| 890 |
if not context:
|
| 891 |
return {
|
| 892 |
"question": question,
|
| 893 |
-
"answer": "
|
| 894 |
"context_used": False
|
| 895 |
}
|
| 896 |
|
| 897 |
-
#
|
| 898 |
theme_prompt = (
|
| 899 |
-
"Extrait le thème principal en
|
| 900 |
-
"
|
| 901 |
)
|
| 902 |
|
| 903 |
theme_result = qa_pipeline(
|
| 904 |
theme_prompt,
|
| 905 |
-
max_length=
|
| 906 |
-
num_beams=
|
| 907 |
-
temperature=0.
|
| 908 |
-
repetition_penalty=
|
|
|
|
| 909 |
)
|
| 910 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 911 |
return {
|
| 912 |
"question": question,
|
| 913 |
-
"answer":
|
| 914 |
"model": current_model,
|
| 915 |
"context_used": True
|
| 916 |
}
|
| 917 |
|
| 918 |
# Standard QA handling
|
| 919 |
-
input_text = f"Réponds en français à: {question}"
|
| 920 |
if context:
|
| 921 |
-
input_text += f" en utilisant
|
| 922 |
|
| 923 |
result = qa_pipeline(
|
| 924 |
input_text,
|
| 925 |
-
max_length=
|
| 926 |
-
num_beams=
|
| 927 |
-
temperature=0.
|
| 928 |
-
repetition_penalty=2.0
|
| 929 |
)
|
| 930 |
|
| 931 |
-
# Post-process answer
|
| 932 |
-
answer = result[0]["generated_text"]
|
| 933 |
-
if answer.lower().startswith(("question:", "réponse:")):
|
| 934 |
-
answer = answer.split(":", 1)[1].strip()
|
| 935 |
-
|
| 936 |
return {
|
| 937 |
"question": question,
|
| 938 |
-
"answer":
|
| 939 |
"model": current_model,
|
| 940 |
"context_used": context is not None
|
| 941 |
}
|
| 942 |
|
| 943 |
except Exception as e:
|
| 944 |
-
logger.error(f"
|
| 945 |
raise HTTPException(500, "Erreur de traitement")
|
| 946 |
|
| 947 |
|
|
|
|
| 132 |
|
| 133 |
|
| 134 |
MODEL_CHOICES = [
|
| 135 |
+
"cmarkea/flan-t5-base-fr", # Best for French
|
| 136 |
+
"bigscience/bloomz-560m", # Good multilingual
|
| 137 |
+
"google/flan-t5-small" # Fallback
|
| 138 |
]
|
| 139 |
|
| 140 |
qa_pipeline = None
|
|
|
|
| 874 |
file: Optional[UploadFile] = File(None)
|
| 875 |
):
|
| 876 |
if qa_pipeline is None:
|
| 877 |
+
raise HTTPException(503, detail="Système indisponible")
|
| 878 |
|
| 879 |
try:
|
| 880 |
# Process file if provided
|
|
|
|
| 882 |
if file:
|
| 883 |
_, content = await process_uploaded_file(file)
|
| 884 |
full_text = extract_text(content, file.filename.split('.')[-1])
|
| 885 |
+
context = re.sub(r'\s+', ' ', full_text).strip()[:1500] # Clean and limit context
|
| 886 |
|
| 887 |
+
# Special handling for theme detection
|
| 888 |
+
theme_keywords = ["thème", "theme", "sujet principal", "quoi le sujet"]
|
| 889 |
if any(kw in question.lower() for kw in theme_keywords):
|
| 890 |
if not context:
|
| 891 |
return {
|
| 892 |
"question": question,
|
| 893 |
+
"answer": "Veuillez fournir un document pour identifier le thème",
|
| 894 |
"context_used": False
|
| 895 |
}
|
| 896 |
|
| 897 |
+
# Optimized theme extraction prompt
|
| 898 |
theme_prompt = (
|
| 899 |
+
"Extrait uniquement le thème principal en une phrase concise en français. "
|
| 900 |
+
"Ne donne pas d'exemples ou de détails. Texte:\n" + context
|
| 901 |
)
|
| 902 |
|
| 903 |
theme_result = qa_pipeline(
|
| 904 |
theme_prompt,
|
| 905 |
+
max_length=50, # Very short for single-sentence answers
|
| 906 |
+
num_beams=1, # More deterministic
|
| 907 |
+
temperature=0.1, # Minimal creativity
|
| 908 |
+
repetition_penalty=3.0,
|
| 909 |
+
no_repeat_ngram_size=2
|
| 910 |
)
|
| 911 |
|
| 912 |
+
# Post-processing cleanup
|
| 913 |
+
clean_answer = theme_result[0]["generated_text"].split(".", 1)[0] + "."
|
| 914 |
+
clean_answer = re.sub(r"^(Le|La)\s+", "", clean_answer) # Remove articles
|
| 915 |
+
|
| 916 |
return {
|
| 917 |
"question": question,
|
| 918 |
+
"answer": clean_answer,
|
| 919 |
"model": current_model,
|
| 920 |
"context_used": True
|
| 921 |
}
|
| 922 |
|
| 923 |
# Standard QA handling
|
| 924 |
+
input_text = f"Réponds brièvement en français à: {question}"
|
| 925 |
if context:
|
| 926 |
+
input_text += f" en utilisant uniquement ceci: {context[:1000]}"
|
| 927 |
|
| 928 |
result = qa_pipeline(
|
| 929 |
input_text,
|
| 930 |
+
max_length=100,
|
| 931 |
+
num_beams=2,
|
| 932 |
+
temperature=0.3
|
|
|
|
| 933 |
)
|
| 934 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 935 |
return {
|
| 936 |
"question": question,
|
| 937 |
+
"answer": result[0]["generated_text"],
|
| 938 |
"model": current_model,
|
| 939 |
"context_used": context is not None
|
| 940 |
}
|
| 941 |
|
| 942 |
except Exception as e:
|
| 943 |
+
logger.error(f"Erreur: {str(e)}")
|
| 944 |
raise HTTPException(500, "Erreur de traitement")
|
| 945 |
|
| 946 |
|