-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcontext_manager.py
More file actions
527 lines (436 loc) · 18.1 KB
/
context_manager.py
File metadata and controls
527 lines (436 loc) · 18.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
"""
FAISS上下文管理器 - 使用向量相似度检索优化上下文
通过语义搜索提取最相关的历史对话,降低token消耗
"""
import os
import json
import pickle
import numpy as np
from typing import List, Dict, Any, Optional, Tuple
from datetime import datetime
import logging
# 配置HuggingFace镜像源(解决国内网络问题)
os.environ.setdefault('HF_ENDPOINT', 'https://hf-mirror.com')
# 延迟导入,避免启动时加载大型模型
_faiss = None
_sentence_transformers = None
def _lazy_import_faiss():
"""延迟导入FAISS"""
global _faiss
if _faiss is None:
import faiss
_faiss = faiss
return _faiss
def _lazy_import_sentence_transformers():
"""延迟导入sentence-transformers"""
global _sentence_transformers
if _sentence_transformers is None:
from sentence_transformers import SentenceTransformer
_sentence_transformers = SentenceTransformer
return _sentence_transformers
logger = logging.getLogger(__name__)
class FAISSContextManager:
"""
基于FAISS的智能上下文管理器
功能:
1. 对历史对话进行向量化存储
2. 基于语义相似度检索最相关的对话
3. 支持增量更新和持久化
4. 自动管理索引大小,避免过度膨胀
"""
def __init__(
self,
embedding_model: str = "paraphrase-multilingual-MiniLM-L12-v2",
index_path: Optional[str] = None,
dimension: int = 384,
max_history_size: int = 1000,
use_gpu: bool = False
):
"""
初始化FAISS上下文管理器
Args:
embedding_model: Sentence-BERT模型名称(默认使用多语言小型模型)
index_path: 索引持久化路径(如果为None则只使用内存)
dimension: 向量维度(取决于embedding模型)
max_history_size: 最大历史记录数(超出时自动清理旧记录)
use_gpu: 是否使用GPU加速(默认False,使用CPU)
"""
self.embedding_model_name = embedding_model
self.dimension = dimension
self.max_history_size = max_history_size
self.index_path = index_path
self.use_gpu = use_gpu
# 延迟加载模型
self._model = None
self._index = None
self._metadata: List[Dict[str, Any]] = [] # 存储每条记录的元数据
# 如果指定了路径,尝试加载现有索引
if index_path and os.path.exists(index_path):
self._load_index()
def _get_model(self):
"""延迟加载embedding模型"""
if self._model is None:
print(f" 正在下载/加载模型: {self.embedding_model_name}")
print(" (首次运行需要下载约120MB,请稍候...)")
logger.info(f"正在加载embedding模型: {self.embedding_model_name}")
SentenceTransformer = _lazy_import_sentence_transformers()
self._model = SentenceTransformer(self.embedding_model_name)
print(" 模型加载完成!")
logger.info("Embedding模型加载完成")
return self._model
def warmup(self):
"""
预热:提前加载模型和索引,避免首次使用时延迟
建议在应用启动时调用
"""
self._get_model()
self._get_index()
logger.info("FAISS上下文管理器预热完成")
def _get_index(self):
"""延迟创建FAISS索引"""
if self._index is None:
faiss = _lazy_import_faiss()
# 使用IndexFlatL2(精确L2距离搜索)
self._index = faiss.IndexFlatL2(self.dimension)
# 如果启用GPU,将索引移到GPU
if self.use_gpu and faiss.get_num_gpus() > 0:
self._index = faiss.index_cpu_to_gpu(
faiss.StandardGpuResources(), 0, self._index
)
logger.info("FAISS索引已移至GPU")
return self._index
def embed_text(self, text: str) -> np.ndarray:
"""
将文本转换为向量
Args:
text: 输入文本
Returns:
向量表示 (dimension,)
"""
model = self._get_model()
embedding = model.encode(text, convert_to_numpy=True)
return embedding.astype('float32')
def embed_texts(self, texts: List[str]) -> np.ndarray:
"""
批量将文本转换为向量
Args:
texts: 文本列表
Returns:
向量矩阵 (n, dimension)
"""
model = self._get_model()
embeddings = model.encode(texts, convert_to_numpy=True, show_progress_bar=False)
return embeddings.astype('float32')
def add_message(
self,
role: str,
content: str,
session_id: str,
client_id: str,
timestamp: Optional[str] = None
) -> int:
"""
添加一条消息到索引
Args:
role: 角色(user/assistant)
content: 消息内容
session_id: 会话ID
client_id: 客户端ID
timestamp: 时间戳(可选)
Returns:
添加的消息在索引中的ID
"""
if not content.strip():
logger.warning("跳过空消息")
return -1
# 生成embedding
embedding = self.embed_text(content)
embedding = embedding.reshape(1, -1) # 变为 (1, dimension)
# 添加到FAISS索引
index = self._get_index()
index.add(embedding)
# 保存元数据
metadata = {
"role": role,
"content": content,
"session_id": session_id,
"client_id": client_id,
"timestamp": timestamp or datetime.now().isoformat(),
"index_id": len(self._metadata) # 当前索引位置
}
self._metadata.append(metadata)
# 如果超过最大历史大小,清理旧记录
if len(self._metadata) > self.max_history_size:
self._trim_old_records()
return metadata["index_id"]
def add_messages_batch(
self,
messages: List[Dict[str, Any]]
) -> List[int]:
"""
批量添加消息(更高效)
Args:
messages: 消息列表,每条消息包含 role, content, session_id, client_id, timestamp
Returns:
添加的消息ID列表
"""
if not messages:
return []
# 过滤空消息
valid_messages = [msg for msg in messages if msg.get("content", "").strip()]
if not valid_messages:
return []
# 批量生成embeddings
contents = [msg["content"] for msg in valid_messages]
embeddings = self.embed_texts(contents)
# 添加到FAISS索引
index = self._get_index()
index.add(embeddings)
# 保存元数据
added_ids = []
for i, msg in enumerate(valid_messages):
metadata = {
"role": msg.get("role", "user"),
"content": msg["content"],
"session_id": msg.get("session_id", "unknown"),
"client_id": msg.get("client_id", "unknown"),
"timestamp": msg.get("timestamp") or datetime.now().isoformat(),
"index_id": len(self._metadata)
}
self._metadata.append(metadata)
added_ids.append(metadata["index_id"])
# 如果超过最大历史大小,清理旧记录
if len(self._metadata) > self.max_history_size:
self._trim_old_records()
return added_ids
def search_relevant_context(
self,
query: str,
top_k: int = 5,
session_id: Optional[str] = None,
client_id: Optional[str] = None,
exclude_current_session: bool = False
) -> List[Dict[str, Any]]:
"""
搜索与查询最相关的上下文
Args:
query: 查询文本
top_k: 返回前k个最相关的结果
session_id: 可选,只在特定会话中搜索
client_id: 可选,只在特定客户端中搜索(优先级低于session_id)
exclude_current_session: 是否排除当前会话(用于跨会话检索)
Returns:
相关消息列表,按相似度降序排列,每条包含 role, content, score, timestamp 等
"""
index = self._get_index()
if index.ntotal == 0:
logger.warning("FAISS索引为空,无法检索")
return []
# 生成查询向量
query_embedding = self.embed_text(query)
query_embedding = query_embedding.reshape(1, -1)
# 搜索(取更多结果以便后续过滤)
search_k = min(top_k * 3, index.ntotal)
distances, indices = index.search(query_embedding, search_k)
# 构建结果
results = []
for i, (idx, distance) in enumerate(zip(indices[0], distances[0])):
if idx < 0 or idx >= len(self._metadata):
continue
metadata = self._metadata[idx]
# 应用过滤条件
# 优先使用session_id匹配(如果提供了session_id,只检查session_id)
if session_id:
if metadata["session_id"] != session_id:
continue
elif client_id:
# 只有在没有session_id时才检查client_id
if metadata["client_id"] != client_id:
continue
if exclude_current_session and metadata["session_id"] == session_id:
continue
# 计算相似度分数(L2距离转换为相似度,越小越相似)
# 使用负距离作为分数,或者转换为0-1的相似度
similarity_score = 1.0 / (1.0 + float(distance))
result = {
"role": metadata["role"],
"content": metadata["content"],
"session_id": metadata["session_id"],
"client_id": metadata["client_id"],
"timestamp": metadata["timestamp"],
"score": similarity_score,
"distance": float(distance)
}
results.append(result)
# 达到所需数量即可停止
if len(results) >= top_k:
break
return results
def get_smart_context(
self,
query: str,
session_id: str,
client_id: str,
max_tokens: int = 2000,
include_recent: int = 3,
include_relevant: int = 5
) -> List[Dict[str, str]]:
"""
智能获取上下文:结合最近对话和相关对话
策略:
1. 保留最近的 include_recent 条对话(保持连贯性)
2. 从历史中检索 include_relevant 条最相关的对话(提供相关背景)
3. 去重并按时间排序
4. 控制总token数在max_tokens以内
Args:
query: 当前查询
session_id: 会话ID
client_id: 客户端ID(仅用于日志,实际检索只用session_id)
max_tokens: 最大token数(粗略估计:1 token ≈ 1.5个中文字符)
include_recent: 包含最近的N条对话
include_relevant: 包含最相关的N条对话
Returns:
上下文消息列表,格式为 [{"role": "user/assistant", "content": "..."}]
"""
# 1. 获取最近的对话(只检查session_id)
recent_messages = [
msg for msg in self._metadata[-include_recent * 2:] # *2 因为一问一答
if msg["session_id"] == session_id
]
# 2. 获取相关对话(排除最近的,只用session_id检索)
recent_timestamps = {msg["timestamp"] for msg in recent_messages}
relevant_messages = self.search_relevant_context(
query=query,
top_k=include_relevant * 2,
session_id=session_id, # 只传session_id
client_id=None # 不使用client_id过滤
)
# 过滤掉已在recent中的消息
relevant_messages = [
msg for msg in relevant_messages
if msg["timestamp"] not in recent_timestamps
][:include_relevant]
# 3. 合并去重
all_messages = recent_messages + relevant_messages
# 按timestamp排序(保持时间顺序)
all_messages.sort(key=lambda x: x.get("timestamp", ""))
# 4. 控制token数量(粗略估计:1 token ≈ 1.5个中文字符)
total_chars = 0
max_chars = int(max_tokens * 1.5)
filtered_messages = []
for msg in all_messages:
content_len = len(msg["content"])
if total_chars + content_len <= max_chars:
filtered_messages.append({
"role": msg["role"],
"content": msg["content"]
})
total_chars += content_len
else:
# 超出限制,尝试截断最后一条
remaining = max_chars - total_chars
if remaining > 100: # 至少保留100字符
filtered_messages.append({
"role": msg["role"],
"content": msg["content"][:remaining] + "..."
})
break
return filtered_messages
def _trim_old_records(self):
"""清理最旧的记录,保持索引在合理大小"""
if len(self._metadata) <= self.max_history_size:
return
# 计算需要删除的数量
num_to_remove = len(self._metadata) - self.max_history_size
logger.info(f"清理{num_to_remove}条旧记录,当前索引大小: {len(self._metadata)}")
# FAISS不支持删除操作,需要重建索引
# 保留后面的记录
keep_metadata = self._metadata[num_to_remove:]
keep_contents = [msg["content"] for msg in keep_metadata]
# 重建索引
faiss = _lazy_import_faiss()
self._index = faiss.IndexFlatL2(self.dimension)
if self.use_gpu and faiss.get_num_gpus() > 0:
self._index = faiss.index_cpu_to_gpu(
faiss.StandardGpuResources(), 0, self._index
)
# 重新添加向量
if keep_contents:
embeddings = self.embed_texts(keep_contents)
self._index.add(embeddings)
# 更新元数据
for i, msg in enumerate(keep_metadata):
msg["index_id"] = i
self._metadata = keep_metadata
logger.info(f"索引重建完成,新大小: {len(self._metadata)}")
def save_index(self, path: Optional[str] = None):
"""
持久化索引和元数据
Args:
path: 保存路径(如果为None,使用初始化时的路径)
"""
save_path = path or self.index_path
if not save_path:
logger.warning("未指定保存路径,跳过持久化")
return
# 确保目录存在
os.makedirs(os.path.dirname(save_path), exist_ok=True)
# 保存FAISS索引
index = self._get_index()
# 如果是GPU索引,先转回CPU
faiss = _lazy_import_faiss()
if self.use_gpu and faiss.get_num_gpus() > 0:
cpu_index = faiss.index_gpu_to_cpu(index)
faiss.write_index(cpu_index, save_path)
else:
faiss.write_index(index, save_path)
# 保存元数据
metadata_path = save_path + ".metadata"
with open(metadata_path, 'wb') as f:
pickle.dump(self._metadata, f)
logger.info(f"索引已保存到: {save_path}")
def _load_index(self):
"""从磁盘加载索引和元数据"""
if not self.index_path or not os.path.exists(self.index_path):
logger.warning(f"索引文件不存在: {self.index_path}")
return
try:
faiss = _lazy_import_faiss()
# 加载FAISS索引
self._index = faiss.read_index(self.index_path)
# 如果启用GPU,将索引移到GPU
if self.use_gpu and faiss.get_num_gpus() > 0:
self._index = faiss.index_cpu_to_gpu(
faiss.StandardGpuResources(), 0, self._index
)
# 加载元数据
metadata_path = self.index_path + ".metadata"
if os.path.exists(metadata_path):
with open(metadata_path, 'rb') as f:
self._metadata = pickle.load(f)
logger.info(f"索引已加载: {self.index_path}, 记录数: {len(self._metadata)}")
except Exception as e:
logger.error(f"加载索引失败: {e}")
self._index = None
self._metadata = []
def get_stats(self) -> Dict[str, Any]:
"""获取索引统计信息"""
index = self._get_index() if self._index else None
return {
"total_messages": len(self._metadata),
"index_size": index.ntotal if index else 0,
"dimension": self.dimension,
"model": self.embedding_model_name,
"max_history_size": self.max_history_size,
"use_gpu": self.use_gpu
}
def clear(self):
"""清空所有索引和元数据"""
faiss = _lazy_import_faiss()
self._index = faiss.IndexFlatL2(self.dimension)
if self.use_gpu and faiss.get_num_gpus() > 0:
self._index = faiss.index_cpu_to_gpu(
faiss.StandardGpuResources(), 0, self._index
)
self._metadata = []
logger.info("索引已清空")