Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,140 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
-
from transformers import pipeline
|
| 3 |
from PIL import Image
|
| 4 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
# =======================
|
| 7 |
-
#
|
| 8 |
# =======================
|
|
|
|
| 9 |
@st.cache_resource
|
| 10 |
-
def
|
| 11 |
"""
|
| 12 |
-
Load the pre-trained
|
| 13 |
-
Cached to prevent reloading on every app interaction.
|
| 14 |
"""
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
model = load_model()
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
explanation = f"The model predicts **{label}** with a confidence of {confidence:.2%}."
|
| 27 |
-
return label, confidence, explanation
|
| 28 |
|
| 29 |
-
#
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
"""
|
| 35 |
-
|
| 36 |
-
Cached to avoid repeated API calls.
|
| 37 |
"""
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
"limit": 5
|
| 43 |
-
}
|
| 44 |
-
response = requests.get(api_url, params=params)
|
| 45 |
-
if response.status_code == 200:
|
| 46 |
-
papers = response.json().get("data", [])
|
| 47 |
-
summaries = []
|
| 48 |
-
for paper in papers:
|
| 49 |
-
title = paper.get("title", "No Title")
|
| 50 |
-
abstract = paper.get("abstract", "No Abstract")
|
| 51 |
-
url = paper.get("url", "No URL")
|
| 52 |
-
summaries.append(f"**{title}**\n\n{abstract}\n\n[Read More]({url})")
|
| 53 |
-
return "\n\n---\n\n".join(summaries)
|
| 54 |
-
else:
|
| 55 |
-
return "Error fetching research papers. Please try again later."
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
-
st.sidebar.header("Navigation")
|
| 68 |
-
app_mode = st.sidebar.radio(
|
| 69 |
-
"Choose a feature",
|
| 70 |
-
["🔍 Skin Cancer Classification", "📄 Latest Research Papers", "ℹ️ About the Model"]
|
| 71 |
-
)
|
| 72 |
|
| 73 |
# =======================
|
| 74 |
-
#
|
| 75 |
# =======================
|
| 76 |
-
if app_mode == "🔍 Skin Cancer Classification":
|
| 77 |
-
st.title("🔍 Skin Cancer Classification")
|
| 78 |
-
st.write(
|
| 79 |
-
"Upload an image of the skin lesion, and the AI model will classify it as one of several types, "
|
| 80 |
-
"such as melanoma, basal cell carcinoma, or benign keratosis-like lesions."
|
| 81 |
-
)
|
| 82 |
|
| 83 |
-
|
| 84 |
|
| 85 |
-
|
| 86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 88 |
|
| 89 |
-
#
|
| 90 |
-
st.
|
| 91 |
-
|
| 92 |
|
| 93 |
-
#
|
| 94 |
-
st.markdown(
|
| 95 |
-
|
| 96 |
-
st.markdown(f"### **Explanation**: {explanation}")
|
| 97 |
|
| 98 |
-
#
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
elif app_mode == "📄 Latest Research Papers":
|
| 102 |
-
st.title("📄 Latest Research Papers")
|
| 103 |
-
st.write(
|
| 104 |
-
"Fetch the latest research papers on skin cancer to stay updated on recent findings and innovations."
|
| 105 |
-
)
|
| 106 |
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
st.markdown(summaries)
|
| 111 |
|
| 112 |
-
#
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
elif
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
- Melanocytic nevi
|
| 126 |
-
- Melanoma
|
| 127 |
-
- Dermatofibroma
|
| 128 |
-
- **Performance Metrics:**
|
| 129 |
-
- **Validation Accuracy:** 96.95%
|
| 130 |
-
- **Train Accuracy:** 96.14%
|
| 131 |
-
- **Loss Function:** Cross-Entropy
|
| 132 |
-
""")
|
| 133 |
|
| 134 |
# =======================
|
| 135 |
# Footer
|
| 136 |
# =======================
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
|
|
|
| 140 |
""")
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import traceback
|
| 3 |
+
import numpy as np
|
| 4 |
import streamlit as st
|
|
|
|
| 5 |
from PIL import Image
|
| 6 |
+
from transformers import pipeline
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
from skimage.color import rgb2gray
|
| 9 |
+
from skimage.filters import threshold_otsu
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# =======================
|
| 13 |
+
# Configuration and Setup
|
| 14 |
+
# =======================
|
| 15 |
+
|
| 16 |
+
# Streamlit Page Configuration
|
| 17 |
+
st.set_page_config(
|
| 18 |
+
page_title="AI Cancer Detection Platform",
|
| 19 |
+
page_icon="🩺",
|
| 20 |
+
layout="wide",
|
| 21 |
+
initial_sidebar_state="expanded",
|
| 22 |
+
menu_items={
|
| 23 |
+
"About": "### AI Cancer Detection Platform\n"
|
| 24 |
+
"Developed to classify cancer images and provide research insights."
|
| 25 |
+
}
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
|
| 29 |
# =======================
|
| 30 |
+
# Helper Functions
|
| 31 |
# =======================
|
| 32 |
+
|
| 33 |
@st.cache_resource
|
| 34 |
+
def load_pipeline():
|
| 35 |
"""
|
| 36 |
+
Load the pre-trained image classification pipeline using PyTorch as the backend.
|
|
|
|
| 37 |
"""
|
| 38 |
+
try:
|
| 39 |
+
model_pipeline = pipeline(
|
| 40 |
+
"image-classification",
|
| 41 |
+
model="Anwarkh1/Skin_Cancer-Image_Classification",
|
| 42 |
+
framework="pt" # Force PyTorch backend
|
| 43 |
+
)
|
| 44 |
+
return model_pipeline
|
| 45 |
+
except Exception as e:
|
| 46 |
+
st.error(f"Error loading model: {e}")
|
| 47 |
+
traceback.print_exc()
|
| 48 |
+
st.stop()
|
| 49 |
|
|
|
|
| 50 |
|
| 51 |
+
def process_image(image):
|
| 52 |
+
"""
|
| 53 |
+
Perform image processing to extract features for better visualization.
|
| 54 |
+
"""
|
| 55 |
+
try:
|
| 56 |
+
# Convert image to grayscale
|
| 57 |
+
gray_image = rgb2gray(np.array(image))
|
|
|
|
|
|
|
| 58 |
|
| 59 |
+
# Apply Otsu's threshold
|
| 60 |
+
thresh = threshold_otsu(gray_image)
|
| 61 |
+
binary = gray_image > thresh
|
| 62 |
+
|
| 63 |
+
# Calculate edge pixel percentage
|
| 64 |
+
edge_pixels = np.sum(binary)
|
| 65 |
+
total_pixels = binary.size
|
| 66 |
+
edge_percentage = (edge_pixels / total_pixels) * 100
|
| 67 |
+
|
| 68 |
+
# Generate plots
|
| 69 |
+
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
|
| 70 |
+
ax[0].imshow(gray_image, cmap="gray")
|
| 71 |
+
ax[0].set_title("Grayscale Image")
|
| 72 |
+
ax[0].axis("off")
|
| 73 |
+
|
| 74 |
+
ax[1].imshow(binary, cmap="gray")
|
| 75 |
+
ax[1].set_title("Binary Image (Thresholded)")
|
| 76 |
+
ax[1].axis("off")
|
| 77 |
+
|
| 78 |
+
plt.tight_layout()
|
| 79 |
+
st.pyplot(fig)
|
| 80 |
+
|
| 81 |
+
# Feature description
|
| 82 |
+
return f"{edge_percentage:.2f}% of the image contains edge pixels after thresholding."
|
| 83 |
+
|
| 84 |
+
except Exception as e:
|
| 85 |
+
st.error(f"Error processing image: {e}")
|
| 86 |
+
traceback.print_exc()
|
| 87 |
+
return "No significant features extracted."
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def classify_image(image, model_pipeline):
|
| 91 |
"""
|
| 92 |
+
Classify the uploaded image using the pre-trained model pipeline.
|
|
|
|
| 93 |
"""
|
| 94 |
+
try:
|
| 95 |
+
# Resize image to 224x224 as required by the model
|
| 96 |
+
image_resized = image.resize((224, 224))
|
| 97 |
+
predictions = model_pipeline(image_resized)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
+
if predictions:
|
| 100 |
+
top_prediction = predictions[0]
|
| 101 |
+
label = top_prediction["label"]
|
| 102 |
+
score = top_prediction["score"]
|
| 103 |
+
return label, score
|
| 104 |
+
else:
|
| 105 |
+
st.warning("No predictions were made.")
|
| 106 |
+
return None, None
|
| 107 |
+
except Exception as e:
|
| 108 |
+
st.error(f"Error during classification: {e}")
|
| 109 |
+
traceback.print_exc()
|
| 110 |
+
return None, None
|
| 111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
# =======================
|
| 114 |
+
# Streamlit Main Content
|
| 115 |
# =======================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
|
| 117 |
+
st.title("🩺 AI-Powered Cancer Detection")
|
| 118 |
|
| 119 |
+
# Image Upload Section
|
| 120 |
+
st.subheader("📤 Upload a Cancer Image")
|
| 121 |
+
uploaded_image = st.file_uploader("Choose an image file...", type=["png", "jpg", "jpeg"])
|
| 122 |
+
|
| 123 |
+
if uploaded_image is not None:
|
| 124 |
+
try:
|
| 125 |
+
# Open the uploaded image
|
| 126 |
+
image = Image.open(uploaded_image).convert("RGB")
|
| 127 |
+
|
| 128 |
+
# Display the uploaded image
|
| 129 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 130 |
|
| 131 |
+
# Process the image
|
| 132 |
+
st.markdown("### 🛠️ Image Processing")
|
| 133 |
+
processed_features = process_image(image)
|
| 134 |
|
| 135 |
+
# Load the model pipeline
|
| 136 |
+
st.markdown("### 🔍 Classifying the Image")
|
| 137 |
+
model_pipeline = load_pipeline()
|
|
|
|
| 138 |
|
| 139 |
+
# Classify the image
|
| 140 |
+
with st.spinner("Classifying..."):
|
| 141 |
+
label, confidence = classify_image(image, model_pipeline)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
|
| 143 |
+
if label and confidence:
|
| 144 |
+
st.write(f"**Prediction:** {label}")
|
| 145 |
+
st.write(f"**Confidence:** {confidence:.2%}")
|
|
|
|
| 146 |
|
| 147 |
+
# Highlight prediction confidence
|
| 148 |
+
if confidence > 0.80:
|
| 149 |
+
st.success("High confidence in the prediction.")
|
| 150 |
+
elif confidence > 0.50:
|
| 151 |
+
st.warning("Moderate confidence in the prediction.")
|
| 152 |
+
else:
|
| 153 |
+
st.error("Low confidence in the prediction.")
|
| 154 |
+
|
| 155 |
+
except Exception as e:
|
| 156 |
+
st.error(f"An unexpected error occurred: {e}")
|
| 157 |
+
traceback.print_exc()
|
| 158 |
+
else:
|
| 159 |
+
st.info("Upload an image to start the classification.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
|
| 161 |
# =======================
|
| 162 |
# Footer
|
| 163 |
# =======================
|
| 164 |
+
|
| 165 |
+
st.markdown("""
|
| 166 |
+
---
|
| 167 |
+
**AI Cancer Detection Platform** | This application is for informational purposes only and is not intended for medical diagnosis.
|
| 168 |
""")
|