tzzte commited on
Commit
bd3cbc5
·
verified ·
1 Parent(s): 21e5853

Update Echox_copy_stream.py

Browse files
Files changed (1) hide show
  1. Echox_copy_stream.py +1 -94
Echox_copy_stream.py CHANGED
@@ -85,96 +85,6 @@ def load_speech_model(device):
85
  # vocoder = load_units_vocoder(voc_cfg, device)
86
  # return vocoder, voc_cfg
87
 
88
- class ScrollingDisplay:
89
- def __init__(self, width=71, height=10):
90
- self.width = width
91
- self.height = height
92
- self.lines = [""] * height
93
- self.current_line = 0
94
- self.current_pos = 0
95
-
96
- def add_token(self, token):
97
- if '\n' in token:
98
- parts = token.split('\n')
99
- for i, part in enumerate(parts):
100
- if i > 0:
101
- self._new_line()
102
- if part:
103
- self._add_text_to_current_line(part)
104
- else:
105
- self._add_text_to_current_line(token)
106
-
107
- def _add_text_to_current_line(self, text):
108
- if not text:
109
- return
110
-
111
- is_continuation = (not text.startswith(' ') and
112
- self.current_pos > 0 and
113
- not self.lines[self.current_line].endswith(' '))
114
-
115
- if is_continuation:
116
- current_line_content = self.lines[self.current_line]
117
- last_space_pos = current_line_content.rfind(' ')
118
-
119
- if last_space_pos != -1:
120
- last_word_start = last_space_pos + 1
121
- else:
122
- last_word_start = 0
123
-
124
- full_word = current_line_content[last_word_start:] + text
125
-
126
- if self.current_pos + len(text) > self.width:
127
- new_content = current_line_content[:last_word_start].rstrip()
128
- self.lines[self.current_line] = new_content
129
- self.current_pos = len(new_content)
130
- self._new_line()
131
- text = full_word
132
- else:
133
- self.lines[self.current_line] += text
134
- self.current_pos += len(text)
135
- return
136
-
137
- if self.current_pos > 0 and len(text) > (self.width - self.current_pos) and len(text) <= self.width:
138
- self._new_line()
139
-
140
- while len(text) > 0:
141
- if self.current_pos == 0:
142
- text = text.lstrip(' ')
143
- if not text:
144
- break
145
-
146
- remaining = self.width - self.current_pos
147
- if len(text) > remaining:
148
- part = text[:remaining]
149
- self.lines[self.current_line] += part
150
- self.current_pos += len(part)
151
- text = text[remaining:]
152
- self._new_line()
153
- else:
154
- self.lines[self.current_line] += text
155
- self.current_pos += len(text)
156
- break
157
-
158
- def _new_line(self):
159
- self.current_line += 1
160
- self.current_pos = 0
161
-
162
- if self.current_line >= self.height:
163
- self.lines = self.lines[1:] + [""]
164
- self.current_line = self.height - 1
165
-
166
- def display(self):
167
- res = ""
168
- for i, line in enumerate(self.lines):
169
- padded_line = line.ljust(self.width)
170
- if i == self.current_line:
171
- cursor_pos = min(self.current_pos, self.width - 1)
172
- display_line = padded_line[:cursor_pos] + "█" + padded_line[cursor_pos + 1:]
173
- res += f"{display_line}\n"
174
- else:
175
- res += f"{padded_line}\n"
176
- return res
177
-
178
  class EchoxAssistant():
179
  def __init__(self):
180
  class BasicSetting:
@@ -368,8 +278,6 @@ class EchoxAssistant():
368
  do_sample = kwargs.get('do_sample', True)
369
 
370
  current_text = ""
371
- display = ScrollingDisplay()
372
-
373
  accumulated_hidden_states = []
374
  accumulated_tokens = []
375
  similarity_scores = []
@@ -443,8 +351,7 @@ class EchoxAssistant():
443
  similarity_scores.append(similarity)
444
 
445
  token_text = self.tokenizer.decode([next_token.item()], skip_special_tokens=True)
446
- display.add_token(token_text)
447
- current_text = display.display()
448
 
449
  yield current_text, None
450
 
 
85
  # vocoder = load_units_vocoder(voc_cfg, device)
86
  # return vocoder, voc_cfg
87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  class EchoxAssistant():
89
  def __init__(self):
90
  class BasicSetting:
 
278
  do_sample = kwargs.get('do_sample', True)
279
 
280
  current_text = ""
 
 
281
  accumulated_hidden_states = []
282
  accumulated_tokens = []
283
  similarity_scores = []
 
351
  similarity_scores.append(similarity)
352
 
353
  token_text = self.tokenizer.decode([next_token.item()], skip_special_tokens=True)
354
+ current_text += token_text
 
355
 
356
  yield current_text, None
357