Harald Nilsen commited on
Commit
5a877d0
·
1 Parent(s): c18feb7
Files changed (1) hide show
  1. app.py +71 -4
app.py CHANGED
@@ -1,7 +1,74 @@
1
  import gradio as gr
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
+ import math
4
 
5
+ print("🎭 Loading sentiment analysis pipeline...")
6
+ sentiment_analyzer = pipeline(
7
+ "sentiment-analysis",
8
+ model="cardiffnlp/twitter-roberta-base-sentiment-latest",
9
+ tokenizer="cardiffnlp/twitter-roberta-base-sentiment-latest",
10
+ return_all_scores=True,
11
+ truncation=True,
12
+ padding=True,
13
+ device_map="auto"
14
+ )
15
 
16
+ def _polarity_from_scores(scores):
17
+ # scores is a list like [{'label': 'negative', 'score': 0.01}, {'label':'neutral',...}, {'label':'positive',...}]
18
+ probs = {s["label"].lower(): float(s["score"]) for s in scores}
19
+ p_pos = probs.get("positive", 0.0)
20
+ p_neg = probs.get("negative", 0.0)
21
+ return p_pos - p_neg # range roughly [-1, 1]
22
+
23
+ def analyze_tone_and_bias(text, chunk_size=500, neutral_margin=0.1):
24
+ """
25
+ Analyze emotional tone and potential bias in sources.
26
+ Uses polarity = P(positive) - P(negative) and respects neutral.
27
+ """
28
+ # Make chunks
29
+ chunks = [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]
30
+ chunks = [c for c in chunks if len(c.strip()) > 10]
31
+ if not chunks:
32
+ return {"error": "Could not analyze sentiment"}
33
+
34
+ # Batch inference
35
+ batch_outputs = sentiment_analyzer(chunks) # list of lists of dicts
36
+
37
+ # Compute per-chunk polarity
38
+ chunk_polarities = [_polarity_from_scores(scores) for scores in batch_outputs]
39
+
40
+ # Aggregate
41
+ avg_polarity = sum(chunk_polarities) / len(chunk_polarities)
42
+ if avg_polarity > neutral_margin:
43
+ overall = "POSITIVE"
44
+ elif avg_polarity < -neutral_margin:
45
+ overall = "NEGATIVE"
46
+ else:
47
+ overall = "NEUTRAL"
48
+
49
+ # Build a compact per-chunk view
50
+ # Use the model's top label for human-readable chunk summaries
51
+ tops = []
52
+ for scores in batch_outputs[:5]:
53
+ top = max(scores, key=lambda s: s["score"])
54
+ tops.append({"label": top["label"].upper(), "score": float(top["score"])})
55
+
56
+ return {
57
+ "overall_sentiment": overall,
58
+ "confidence": float(abs(avg_polarity)),
59
+ "chunk_analysis": tops
60
+ }
61
+
62
+ print("✅ Sentiment analysis pipeline ready")
63
+
64
+ def analyze_sentiment(text):
65
+ result = analyze_tone_and_bias(text)
66
+ return result['chunk_analysis'][0]
67
+
68
+ demo = gr.Interface(fn=analyze_sentiment, inputs="textbox", outputs="textbox")
69
+
70
+ test_txt = "Nærmeste samtlige biler og vogntog som passerte Ørskogfjellet da kontrollen pågikk ble stoppet. Hele 50 personer fra de ulike etatene var involvert i kontrollen."
71
+
72
+ print(analyze_sentiment(test_txt))
73
+
74
+ demo.launch(share=True)