Update app.py
Browse files
app.py
CHANGED
|
@@ -4,15 +4,16 @@ from langchain.chains import LLMChain
|
|
| 4 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 5 |
import fitz
|
| 6 |
import json
|
|
|
|
| 7 |
|
| 8 |
# Title
|
| 9 |
-
st.title("π
|
| 10 |
|
| 11 |
# Sidebar
|
| 12 |
st.sidebar.title("Upload & Settings")
|
| 13 |
|
| 14 |
-
# Upload
|
| 15 |
-
|
| 16 |
|
| 17 |
# Number of questions
|
| 18 |
number_of_questions = st.sidebar.slider("Number of questions", min_value=1, max_value=20, value=5)
|
|
@@ -28,7 +29,7 @@ if "quiz_finished" not in st.session_state:
|
|
| 28 |
st.session_state.quiz_finished = False
|
| 29 |
|
| 30 |
# Gemini setup
|
| 31 |
-
GOOGLE_API_KEY = "
|
| 32 |
llm = ChatGoogleGenerativeAI(
|
| 33 |
model="gemini-2.0-flash",
|
| 34 |
google_api_key=GOOGLE_API_KEY,
|
|
@@ -60,28 +61,23 @@ prompt = PromptTemplate(
|
|
| 60 |
|
| 61 |
mcq_chain = LLMChain(llm=llm, prompt=prompt)
|
| 62 |
|
| 63 |
-
# PDF or
|
| 64 |
def extract_text(file):
|
| 65 |
if file.name.endswith(".pdf"):
|
| 66 |
doc = fitz.open(stream=file.read(), filetype="pdf")
|
| 67 |
-
|
| 68 |
-
for page in doc:
|
| 69 |
-
full_text += page.get_text()
|
| 70 |
-
doc.close()
|
| 71 |
-
return full_text
|
| 72 |
elif file.name.endswith(".docx"):
|
| 73 |
doc = docx.Document(file)
|
| 74 |
return "\n".join([para.text for para in doc.paragraphs])
|
| 75 |
-
|
| 76 |
-
return ""
|
| 77 |
|
| 78 |
# Generate MCQs
|
| 79 |
if st.sidebar.button("Generate MCQs"):
|
| 80 |
-
if
|
| 81 |
-
st.error("Please upload a
|
| 82 |
else:
|
| 83 |
with st.spinner("Extracting text and generating MCQs..."):
|
| 84 |
-
text = extract_text(
|
| 85 |
try:
|
| 86 |
response = mcq_chain.run(text=text, number=str(number_of_questions))
|
| 87 |
mcqs_json = json.loads(response[8:-3])
|
|
@@ -99,20 +95,18 @@ if st.session_state.mcqs and not st.session_state.quiz_finished:
|
|
| 99 |
q_data = st.session_state.mcqs[idx]
|
| 100 |
|
| 101 |
st.subheader(f"Question {idx + 1}: {q_data['question']}")
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
q_data["options"],
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
st.session_state.quiz_finished = True
|
| 115 |
-
st.success("π Quiz completed!")
|
| 116 |
|
| 117 |
# Show result
|
| 118 |
if st.session_state.quiz_finished:
|
|
|
|
| 4 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 5 |
import fitz
|
| 6 |
import json
|
| 7 |
+
import docx # Added for Word file handling
|
| 8 |
|
| 9 |
# Title
|
| 10 |
+
st.title("π File-based MCQ Generator")
|
| 11 |
|
| 12 |
# Sidebar
|
| 13 |
st.sidebar.title("Upload & Settings")
|
| 14 |
|
| 15 |
+
# Upload file
|
| 16 |
+
uploaded_file = st.sidebar.file_uploader("Upload a file (PDF or Word)", type=["pdf", "docx"])
|
| 17 |
|
| 18 |
# Number of questions
|
| 19 |
number_of_questions = st.sidebar.slider("Number of questions", min_value=1, max_value=20, value=5)
|
|
|
|
| 29 |
st.session_state.quiz_finished = False
|
| 30 |
|
| 31 |
# Gemini setup
|
| 32 |
+
GOOGLE_API_KEY = "your_api_key_here"
|
| 33 |
llm = ChatGoogleGenerativeAI(
|
| 34 |
model="gemini-2.0-flash",
|
| 35 |
google_api_key=GOOGLE_API_KEY,
|
|
|
|
| 61 |
|
| 62 |
mcq_chain = LLMChain(llm=llm, prompt=prompt)
|
| 63 |
|
| 64 |
+
# Extract text from PDF or Word
|
| 65 |
def extract_text(file):
|
| 66 |
if file.name.endswith(".pdf"):
|
| 67 |
doc = fitz.open(stream=file.read(), filetype="pdf")
|
| 68 |
+
return "\n".join([page.get_text() for page in doc])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
elif file.name.endswith(".docx"):
|
| 70 |
doc = docx.Document(file)
|
| 71 |
return "\n".join([para.text for para in doc.paragraphs])
|
| 72 |
+
return ""
|
|
|
|
| 73 |
|
| 74 |
# Generate MCQs
|
| 75 |
if st.sidebar.button("Generate MCQs"):
|
| 76 |
+
if uploaded_file is None:
|
| 77 |
+
st.error("Please upload a file.")
|
| 78 |
else:
|
| 79 |
with st.spinner("Extracting text and generating MCQs..."):
|
| 80 |
+
text = extract_text(uploaded_file)
|
| 81 |
try:
|
| 82 |
response = mcq_chain.run(text=text, number=str(number_of_questions))
|
| 83 |
mcqs_json = json.loads(response[8:-3])
|
|
|
|
| 95 |
q_data = st.session_state.mcqs[idx]
|
| 96 |
|
| 97 |
st.subheader(f"Question {idx + 1}: {q_data['question']}")
|
| 98 |
+
|
| 99 |
+
with st.form(key=f"form_{idx}"):
|
| 100 |
+
selected_option = st.radio("Choose an answer:", q_data["options"], key=f"radio_{idx}")
|
| 101 |
+
submitted = st.form_submit_button("Next")
|
| 102 |
+
|
| 103 |
+
if submitted:
|
| 104 |
+
st.session_state.user_answers[idx] = selected_option
|
| 105 |
+
if idx < len(st.session_state.mcqs) - 1:
|
| 106 |
+
st.session_state.current_q += 1
|
| 107 |
+
else:
|
| 108 |
+
st.session_state.quiz_finished = True
|
| 109 |
+
st.success("π Quiz completed!")
|
|
|
|
|
|
|
| 110 |
|
| 111 |
# Show result
|
| 112 |
if st.session_state.quiz_finished:
|