David Driscoll
commited on
Commit
·
1a94dbc
1
Parent(s):
ee83343
Slider
Browse files
app.py
CHANGED
|
@@ -123,7 +123,7 @@ def compute_faces_overlay(image):
|
|
| 123 |
# -----------------------------
|
| 124 |
# New Facemesh Functions (with connected red lines and mask output)
|
| 125 |
# -----------------------------
|
| 126 |
-
def compute_facemesh_overlay(image):
|
| 127 |
"""
|
| 128 |
Uses MediaPipe Face Mesh to detect and draw facial landmarks.
|
| 129 |
Draws green dots for landmarks and connects them with thin red lines.
|
|
@@ -137,9 +137,9 @@ def compute_facemesh_overlay(image):
|
|
| 137 |
annotated = frame_bgr.copy()
|
| 138 |
mask = np.zeros_like(frame_bgr)
|
| 139 |
|
| 140 |
-
# Initialize Face Mesh in static mode
|
| 141 |
face_mesh = mp.solutions.face_mesh.FaceMesh(
|
| 142 |
-
static_image_mode=True, max_num_faces=1, refine_landmarks=True, min_detection_confidence=
|
| 143 |
)
|
| 144 |
results = face_mesh.process(cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB))
|
| 145 |
|
|
@@ -169,8 +169,8 @@ def compute_facemesh_overlay(image):
|
|
| 169 |
face_mesh.close()
|
| 170 |
return annotated, mask, text
|
| 171 |
|
| 172 |
-
def analyze_facemesh(image):
|
| 173 |
-
annotated_image, mask_image, text = compute_facemesh_overlay(image)
|
| 174 |
return (
|
| 175 |
annotated_image,
|
| 176 |
mask_image,
|
|
@@ -286,14 +286,22 @@ faces_interface = gr.Interface(
|
|
| 286 |
|
| 287 |
facemesh_interface = gr.Interface(
|
| 288 |
fn=analyze_facemesh,
|
| 289 |
-
inputs=
|
|
|
|
|
|
|
|
|
|
| 290 |
outputs=[
|
| 291 |
gr.Image(type="numpy", label="Annotated Output"),
|
| 292 |
gr.Image(type="numpy", label="Mask Output"),
|
| 293 |
gr.HTML(label="Facemesh Analysis")
|
| 294 |
],
|
| 295 |
title="<div style='color:#00ff00;'>Facemesh",
|
| 296 |
-
description="
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 297 |
examples=SAMPLE_IMAGES,
|
| 298 |
live=False
|
| 299 |
)
|
|
|
|
| 123 |
# -----------------------------
|
| 124 |
# New Facemesh Functions (with connected red lines and mask output)
|
| 125 |
# -----------------------------
|
| 126 |
+
def compute_facemesh_overlay(image, confidence=0.5):
|
| 127 |
"""
|
| 128 |
Uses MediaPipe Face Mesh to detect and draw facial landmarks.
|
| 129 |
Draws green dots for landmarks and connects them with thin red lines.
|
|
|
|
| 137 |
annotated = frame_bgr.copy()
|
| 138 |
mask = np.zeros_like(frame_bgr)
|
| 139 |
|
| 140 |
+
# Initialize Face Mesh in static mode with adjustable confidence
|
| 141 |
face_mesh = mp.solutions.face_mesh.FaceMesh(
|
| 142 |
+
static_image_mode=True, max_num_faces=1, refine_landmarks=True, min_detection_confidence=confidence
|
| 143 |
)
|
| 144 |
results = face_mesh.process(cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB))
|
| 145 |
|
|
|
|
| 169 |
face_mesh.close()
|
| 170 |
return annotated, mask, text
|
| 171 |
|
| 172 |
+
def analyze_facemesh(image, confidence):
|
| 173 |
+
annotated_image, mask_image, text = compute_facemesh_overlay(image, confidence)
|
| 174 |
return (
|
| 175 |
annotated_image,
|
| 176 |
mask_image,
|
|
|
|
| 286 |
|
| 287 |
facemesh_interface = gr.Interface(
|
| 288 |
fn=analyze_facemesh,
|
| 289 |
+
inputs=[
|
| 290 |
+
gr.Image(label="Upload an Image for Facemesh"),
|
| 291 |
+
gr.Slider(0.0, 1.0, value=0.5, label="Detection Confidence", elem_id="confidence_slider")
|
| 292 |
+
],
|
| 293 |
outputs=[
|
| 294 |
gr.Image(type="numpy", label="Annotated Output"),
|
| 295 |
gr.Image(type="numpy", label="Mask Output"),
|
| 296 |
gr.HTML(label="Facemesh Analysis")
|
| 297 |
],
|
| 298 |
title="<div style='color:#00ff00;'>Facemesh",
|
| 299 |
+
description="""
|
| 300 |
+
<div style='color:#00ff00;'>
|
| 301 |
+
Detects facial landmarks using MediaPipe Face Mesh.
|
| 302 |
+
<button onclick="document.getElementById('confidence_slider').value = 0.5; document.getElementById('confidence_slider').dispatchEvent(new Event('change'))" style="margin-left:10px;">Reset to Default</button>
|
| 303 |
+
</div>
|
| 304 |
+
""",
|
| 305 |
examples=SAMPLE_IMAGES,
|
| 306 |
live=False
|
| 307 |
)
|