AI应用的错误处理工程2026:让LLM系统在生产环境中优雅降级
为什么AI应用的错误处理比传统软件更复杂传统软件的错误处理有明确的边界数据库连接失败、文件不存在、网络超时……这些都是确定性的、可以精确捕获和处理的错误。但LLM应用引入了一类新型的模糊错误- 模型返回了格式错误的JSON- 工具调用参数有问题- 模型生成了与预期完全不同的内容- 上下文超长导致截断- 幻觉——输出看起来正常但内容是错的这些错误不会抛出异常但会悄悄破坏你的业务逻辑。本文系统梳理AI应用错误处理的完整工程体系。## 一、LLM API层错误处理### 1.1 分类处理不同类型的API错误pythonimport timeimport randomfrom typing import Callable, Any, Optionalfrom anthropic import Anthropic, APIError, RateLimitError, APITimeoutError, APIConnectionErrorclient Anthropic()class LLMError(Exception): LLM调用基础异常 passclass LLMRateLimitError(LLMError): 速率限制可重试 passclass LLMOverloadError(LLMError): 服务过载可重试 passclass LLMContextLengthError(LLMError): 上下文超长需要截断后重试 passclass LLMContentFilterError(LLMError): 内容被过滤不可重试 passdef call_llm_with_error_handling(messages: list, **kwargs) - str: 带完整错误处理的LLM调用 try: response client.messages.create( modelclaude-4-sonnet-20260101, max_tokens2048, messagesmessages, **kwargs ) # 检查finish_reason if response.stop_reason max_tokens: # 输出被截断可能需要特殊处理 print(警告输出被max_tokens截断) return response.content[0].text except RateLimitError as e: raise LLMRateLimitError(f速率限制: {e}) from e except APITimeoutError as e: raise LLMOverloadError(f请求超时: {e}) from e except APIError as e: if e.status_code 529: raise LLMOverloadError(fAPI过载: {e}) from e elif e.status_code 400: error_msg str(e) if context_length in error_msg or too long in error_msg: raise LLMContextLengthError(f上下文超长: {e}) from e elif content_filter in error_msg: raise LLMContentFilterError(f内容被过滤: {e}) from e raise LLMError(fAPI错误 ({e.status_code}): {e}) from e### 1.2 智能重试策略pythonimport asynciofrom dataclasses import dataclassdataclassclass RetryConfig: max_attempts: int 3 base_delay: float 1.0 max_delay: float 60.0 exponential_base: float 2.0 jitter: bool True # 添加随机抖动避免雷同重试def calculate_delay(attempt: int, config: RetryConfig) - float: 计算重试等待时间指数退避抖动 delay min( config.base_delay * (config.exponential_base ** attempt), config.max_delay ) if config.jitter: delay delay * (0.5 random.random() * 0.5) return delaydef with_retry( func: Callable, retry_config: RetryConfig None, retryable_errors: tuple (LLMRateLimitError, LLMOverloadError)): 通用重试装饰器工厂 config retry_config or RetryConfig() def decorator(*args, **kwargs) - Any: last_error None for attempt in range(config.max_attempts): try: return func(*args, **kwargs) except retryable_errors as e: last_error e if attempt config.max_attempts - 1: delay calculate_delay(attempt, config) print(f第{attempt1}次失败 ({type(e).__name__}) f{delay:.1f}秒后重试...) time.sleep(delay) except Exception as e: # 不可重试的错误直接抛出 raise raise last_error return decorator# 使用示例with_retrydef reliable_llm_call(messages: list) - str: return call_llm_with_error_handling(messages)## 二、上下文长度管理上下文超限是生产环境中最常见的问题之一### 2.1 动态上下文压缩pythonimport tiktokenfrom typing import List, Dictdef count_tokens(text: str, model: str gpt-4) - int: 计算文本token数 try: encoding tiktoken.encoding_for_model(model) return len(encoding.encode(text)) except: # 粗略估算中文约2字/token英文约4字/token return len(text) // 3class ContextManager: 动态上下文管理器 def __init__(self, max_tokens: int 100000, reserve_tokens: int 4000): self.max_tokens max_tokens self.reserve_tokens reserve_tokens # 为输出预留 self.available_tokens max_tokens - reserve_tokens def fit_messages( self, messages: List[Dict], system_prompt: str ) - List[Dict]: 裁剪消息列表以适应上下文限制 策略保留system提示、最新消息压缩中间历史 system_tokens count_tokens(system_prompt) remaining self.available_tokens - system_tokens if remaining 0: raise ValueError(f系统提示太长{system_tokens} tokens) # 从最新消息开始反向计算能放多少 fitted [] current_tokens 0 for message in reversed(messages): msg_tokens count_tokens(str(message)) if current_tokens msg_tokens remaining: fitted.insert(0, message) current_tokens msg_tokens else: # 如果是最后一条用户消息必须保留即使需要截断 if not fitted and message[role] user: # 截断到可用长度 truncated self._truncate_message(message, remaining) fitted.insert(0, truncated) break return fitted def _truncate_message(self, message: Dict, max_tokens: int) - Dict: 截断消息内容 content message[content] # 保留前80%和后20%删除中间内容 truncated content[:int(len(content) * 0.6)] \n\n[...内容已截断...]\n\n content[-500:] return {**message, content: truncated}context_manager ContextManager(max_tokens180000) # Claude 4 200Kdef smart_llm_call(system_prompt: str, messages: list) - str: 智能处理上下文长度的LLM调用 try: fitted_messages context_manager.fit_messages(messages, system_prompt) if len(fitted_messages) len(messages): print(f⚠️ 上下文压缩{len(messages)} → {len(fitted_messages)} 条消息) return call_llm_with_error_handling( fitted_messages, systemsystem_prompt ) except LLMContextLengthError: # 如果还是超长执行更激进的压缩 print(⚠️ 执行激进压缩...) summary summarize_history(messages[:-1]) # 压缩历史 compressed_messages [ {role: system, content: f历史对话摘要{summary}}, messages[-1] # 只保留最新消息 ] return call_llm_with_error_handling(compressed_messages)## 三、输出验证与降级策略### 3.1 多层验证框架pythonfrom pydantic import BaseModel, ValidationErrorfrom typing import Optional, Callable, TypeVar, GenericT TypeVar(T)class ValidationResult(Generic[T]): def __init__( self, success: bool, value: Optional[T] None, error: Optional[str] None, fallback_used: bool False ): self.success success self.value value self.error error self.fallback_used fallback_useddef validate_and_parse( raw_output: str, model_class: type, fallback_fn: Optional[Callable] None) - ValidationResult: 带降级的输出验证 # 层1直接解析 try: data json.loads(raw_output.strip()) validated model_class(**data) return ValidationResult(successTrue, valuevalidated) except (json.JSONDecodeError, ValidationError, Exception) as e: pass # 层2提取JSON后解析 try: extracted extract_json_from_text(raw_output) if extracted: validated model_class(**extracted) return ValidationResult(successTrue, valuevalidated) except Exception: pass # 层3LLM修复 try: fixed llm_fix_json(raw_output, model_class.schema()) if fixed: validated model_class(**fixed) return ValidationResult(successTrue, valuevalidated) except Exception: pass # 层4使用降级函数 if fallback_fn: try: fallback_value fallback_fn(raw_output) return ValidationResult( successTrue, valuefallback_value, fallback_usedTrue ) except Exception as e: pass return ValidationResult( successFalse, errorf所有解析策略均失败原始输出{raw_output[:200]} )def llm_fix_json(broken_json: str, schema: dict) - Optional[dict]: 用LLM修复破损的JSON try: response client.messages.create( modelclaude-4-haiku-20260101, # 用轻量模型修复省成本 max_tokens1024, messages[{ role: user, content: f修复以下JSON使其符合Schema要求。只输出有效的JSON不要任何解释。破损的JSON{broken_json}需要的Schema{json.dumps(schema, ensure_asciiFalse)} }] ) fixed_text response.content[0].text.strip() return json.loads(fixed_text) except Exception: return None### 3.2 优雅降级设计pythonclass GracefulDegradation: 优雅降级管理器 def __init__(self): self.degradation_level 0 # 0正常, 1降级1级, 2降级2级 self.error_counts {} self.threshold 3 # 连续错误阈值 def record_error(self, error_type: str): self.error_counts[error_type] self.error_counts.get(error_type, 0) 1 # 连续错误时升级降级等级 total_recent_errors sum(self.error_counts.values()) if total_recent_errors self.threshold: self.degradation_level min(self.degradation_level 1, 2) print(f⚠️ 服务降级到Level {self.degradation_level}) def record_success(self): self.error_counts.clear() if self.degradation_level 0: self.degradation_level - 1 print(f✅ 恢复到Level {self.degradation_level}) def get_model_for_level(self) - str: 降级时使用更稳定/便宜的模型 models { 0: claude-4-sonnet-20260101, # 正常 1: claude-3-5-haiku-20241022, # 降级1级 2: gpt-4o-mini # 降级2级备用提供商 } return models.get(self.degradation_level, models[2])degrader GracefulDegradation()def adaptive_llm_call(messages: list) - str: 自适应降级的LLM调用 model degrader.get_model_for_level() try: if claude in model: response client.messages.create( modelmodel, max_tokens2048, messagesmessages ) result response.content[0].text else: # 切换到OpenAI from openai import OpenAI oai_client OpenAI() response oai_client.chat.completions.create( modelmodel, messages[{role: m[role], content: m[content]} for m in messages] ) result response.choices[0].message.content degrader.record_success() return result except (LLMRateLimitError, LLMOverloadError) as e: degrader.record_error(type(e).__name__) raise## 四、可观测性让错误无处遁形pythonimport loggingimport jsonfrom datetime import datetime# 结构化日志logger logging.getLogger(llm_errors)class LLMCallLogger: LLM调用日志记录器 staticmethod def log_call( model: str, input_tokens: int, output_tokens: int, latency_ms: float, success: bool, error_type: Optional[str] None, request_id: Optional[str] None ): log_entry { timestamp: datetime.utcnow().isoformat(), request_id: request_id, model: model, input_tokens: input_tokens, output_tokens: output_tokens, latency_ms: round(latency_ms, 2), success: success, error_type: error_type, cost_usd: (input_tokens * 3.0 output_tokens * 15.0) / 1_000_000 } if success: logger.info(json.dumps(log_entry, ensure_asciiFalse)) else: logger.error(json.dumps(log_entry, ensure_asciiFalse)) staticmethod def log_validation_failure( raw_output_preview: str, expected_schema: str, fallback_used: bool ): logger.warning(json.dumps({ event: output_validation_failure, timestamp: datetime.utcnow().isoformat(), output_preview: raw_output_preview[:200], expected_schema: expected_schema[:100], fallback_used: fallback_used }, ensure_asciiFalse))## 五、错误处理检查清单生产上线前必须验证的错误处理清单API层☐ 速率限制重试指数退避抖动☐ 超时重试☐ 服务过载重试☐ 不可重试错误正确识别内容过滤等上下文层☐ Token计数实现☐ 上下文超限时的压缩策略☐ 关键消息最新用户输入保护输出层☐ 格式错误的容错解析☐ Schema验证失败的降级处理☐ 空输出处理系统层☐ 多提供商降级☐ 错误率监控☐ 结构化日志## 结语AI应用的错误处理不是一个可以以后再做的事情——在生产环境中没有错误处理的AI应用会在最不恰当的时候崩溃以最难调试的方式崩溃。构建健壮的AI应用核心原则是假设一切都会出错为每种出错设计处理路径。从API调用到输出解析从上下文管理到多提供商降级完整的错误处理体系才能让你的AI应用真正做到生产级可靠。
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2585481.html
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!