Spaces:
Runtime error
Runtime error
Cascade Bot
Updated ChainOfThoughtStrategy to use StrategyResult and improved implementation
3c2aa2f
| """Chain of Thought reasoning implementation with advanced features.""" | |
| import logging | |
| from typing import Dict, Any, List, Optional, Tuple | |
| import json | |
| from dataclasses import dataclass | |
| from enum import Enum | |
| from datetime import datetime | |
| from .base import ReasoningStrategy, StrategyResult | |
| class ThoughtType(Enum): | |
| """Types of thoughts in the chain.""" | |
| OBSERVATION = "observation" | |
| ANALYSIS = "analysis" | |
| HYPOTHESIS = "hypothesis" | |
| VERIFICATION = "verification" | |
| CONCLUSION = "conclusion" | |
| REFLECTION = "reflection" | |
| REFINEMENT = "refinement" | |
| class Thought: | |
| """Represents a single thought in the chain.""" | |
| type: ThoughtType | |
| content: str | |
| confidence: float | |
| evidence: List[str] | |
| alternatives: List[str] | |
| next_steps: List[str] | |
| metadata: Dict[str, Any] | |
| timestamp: str = datetime.now().isoformat() | |
| class ChainOfThoughtStrategy(ReasoningStrategy): | |
| """ | |
| Advanced Chain of Thought reasoning implementation with: | |
| - Hierarchical thought chains | |
| - Confidence scoring | |
| - Alternative path exploration | |
| - Self-reflection and refinement | |
| - Evidence tracking | |
| - Meta-learning capabilities | |
| """ | |
| def __init__(self, | |
| min_confidence: float = 0.7, | |
| parallel_threshold: int = 3, | |
| learning_rate: float = 0.1, | |
| strategy_weights: Optional[Dict[str, float]] = None): | |
| """Initialize Chain of Thought reasoning.""" | |
| super().__init__() | |
| self.min_confidence = min_confidence | |
| self.parallel_threshold = parallel_threshold | |
| self.learning_rate = learning_rate | |
| self.strategy_weights = strategy_weights or { | |
| 'observation': 0.2, | |
| 'analysis': 0.3, | |
| 'hypothesis': 0.2, | |
| 'verification': 0.15, | |
| 'conclusion': 0.15 | |
| } | |
| # Initialize thought chain | |
| self.thoughts: List[Thought] = [] | |
| # Performance tracking | |
| self.performance_metrics = { | |
| 'avg_confidence': 0.0, | |
| 'chain_length': 0, | |
| 'refinement_count': 0, | |
| 'parallel_paths': 0 | |
| } | |
| async def reason( | |
| self, | |
| query: str, | |
| context: Dict[str, Any] | |
| ) -> StrategyResult: | |
| """ | |
| Apply Chain of Thought reasoning to analyze the query. | |
| Args: | |
| query: The input query to reason about | |
| context: Additional context and parameters | |
| Returns: | |
| StrategyResult containing the reasoning chain and confidence | |
| """ | |
| try: | |
| # Reset thought chain | |
| self.thoughts = [] | |
| # Initial observation | |
| await self._add_thought( | |
| ThoughtType.OBSERVATION, | |
| f"Analyzing query: {query}", | |
| context | |
| ) | |
| # Generate analysis thoughts | |
| await self._analyze_query(query, context) | |
| # Generate hypotheses | |
| hypotheses = await self._generate_hypotheses(context) | |
| # Verify hypotheses | |
| await self._verify_hypotheses(hypotheses, context) | |
| # Draw conclusions | |
| conclusion = await self._draw_conclusion(context) | |
| # Reflect and refine | |
| if conclusion.confidence < self.min_confidence: | |
| await self._reflect_and_refine(context) | |
| conclusion = await self._draw_conclusion(context) | |
| # Update performance metrics | |
| self._update_metrics() | |
| return StrategyResult( | |
| strategy_type="chain_of_thought", | |
| success=True, | |
| answer=conclusion.content, | |
| confidence=conclusion.confidence, | |
| reasoning_trace=[{ | |
| "step": str(t.type.value), | |
| "content": t.content, | |
| "confidence": t.confidence, | |
| "evidence": t.evidence, | |
| "alternatives": t.alternatives, | |
| "next_steps": t.next_steps, | |
| "metadata": t.metadata, | |
| "timestamp": t.timestamp | |
| } for t in self.thoughts], | |
| metadata={ | |
| "num_thoughts": len(self.thoughts), | |
| "thought_types": [t.type.value for t in self.thoughts], | |
| "final_confidence": conclusion.confidence | |
| }, | |
| performance_metrics=self.performance_metrics | |
| ) | |
| except Exception as e: | |
| logging.error(f"Chain of Thought reasoning error: {str(e)}") | |
| return StrategyResult( | |
| strategy_type="chain_of_thought", | |
| success=False, | |
| answer=None, | |
| confidence=0.0, | |
| reasoning_trace=[{ | |
| "step": "error", | |
| "error": str(e), | |
| "timestamp": datetime.now().isoformat() | |
| }], | |
| metadata={"error": str(e)}, | |
| performance_metrics=self.performance_metrics | |
| ) | |
| async def _add_thought( | |
| self, | |
| type: ThoughtType, | |
| content: str, | |
| context: Dict[str, Any] | |
| ) -> Thought: | |
| """Add a new thought to the chain.""" | |
| thought = Thought( | |
| type=type, | |
| content=content, | |
| confidence=self._calculate_confidence(content, context), | |
| evidence=self._gather_evidence(content, context), | |
| alternatives=self._generate_alternatives(content, context), | |
| next_steps=self._determine_next_steps(type, context), | |
| metadata=self._extract_metadata(content, context) | |
| ) | |
| self.thoughts.append(thought) | |
| return thought | |
| async def _analyze_query( | |
| self, | |
| query: str, | |
| context: Dict[str, Any] | |
| ) -> None: | |
| """Generate analysis thoughts.""" | |
| # Extract key components | |
| components = self._extract_components(query) | |
| # Analyze each component | |
| for comp in components: | |
| await self._add_thought( | |
| ThoughtType.ANALYSIS, | |
| f"Analysis of {comp}: {self._analyze_component(comp, context)}", | |
| context | |
| ) | |
| async def _generate_hypotheses( | |
| self, | |
| context: Dict[str, Any] | |
| ) -> List[Thought]: | |
| """Generate hypothesis thoughts.""" | |
| hypotheses = [] | |
| # Generate hypotheses based on analysis | |
| analysis_thoughts = [t for t in self.thoughts if t.type == ThoughtType.ANALYSIS] | |
| for analysis in analysis_thoughts: | |
| hypothesis = await self._add_thought( | |
| ThoughtType.HYPOTHESIS, | |
| f"Based on {analysis.content}, hypothesis: {self._generate_hypothesis(analysis, context)}", | |
| context | |
| ) | |
| hypotheses.append(hypothesis) | |
| return hypotheses | |
| async def _verify_hypotheses( | |
| self, | |
| hypotheses: List[Thought], | |
| context: Dict[str, Any] | |
| ) -> None: | |
| """Verify generated hypotheses.""" | |
| for hypothesis in hypotheses: | |
| await self._add_thought( | |
| ThoughtType.VERIFICATION, | |
| f"Verifying {hypothesis.content}: {self._verify_hypothesis(hypothesis, context)}", | |
| context | |
| ) | |
| async def _draw_conclusion( | |
| self, | |
| context: Dict[str, Any] | |
| ) -> Thought: | |
| """Draw conclusion from verified hypotheses.""" | |
| verified_thoughts = [t for t in self.thoughts if t.type == ThoughtType.VERIFICATION] | |
| conclusion_content = self._synthesize_conclusion(verified_thoughts, context) | |
| return await self._add_thought( | |
| ThoughtType.CONCLUSION, | |
| conclusion_content, | |
| context | |
| ) | |
| async def _reflect_and_refine( | |
| self, | |
| context: Dict[str, Any] | |
| ) -> None: | |
| """Reflect on the reasoning chain and refine if needed.""" | |
| # Add reflection thought | |
| reflection = await self._add_thought( | |
| ThoughtType.REFLECTION, | |
| self._generate_reflection(self.thoughts, context), | |
| context | |
| ) | |
| # Add refinement if needed | |
| if reflection.confidence < self.min_confidence: | |
| await self._add_thought( | |
| ThoughtType.REFINEMENT, | |
| self._generate_refinement(reflection, context), | |
| context | |
| ) | |
| self.performance_metrics['refinement_count'] += 1 | |
| def _calculate_confidence( | |
| self, | |
| content: str, | |
| context: Dict[str, Any] | |
| ) -> float: | |
| """Calculate confidence score for a thought.""" | |
| # Base confidence | |
| confidence = 0.5 | |
| # Adjust based on content length and complexity | |
| words = content.split() | |
| if len(words) > 50: | |
| confidence += 0.1 | |
| if len(words) > 100: | |
| confidence += 0.1 | |
| # Adjust based on evidence | |
| evidence = self._gather_evidence(content, context) | |
| confidence += min(0.3, len(evidence) * 0.1) | |
| return min(1.0, confidence) | |
| def _gather_evidence( | |
| self, | |
| content: str, | |
| context: Dict[str, Any] | |
| ) -> List[str]: | |
| """Gather evidence supporting the thought.""" | |
| evidence = [] | |
| # Extract from context | |
| if 'evidence' in context: | |
| evidence.extend(context['evidence']) | |
| # Extract from previous thoughts | |
| for thought in self.thoughts: | |
| if any(term in thought.content.lower() for term in content.lower().split()): | |
| evidence.append(f"Supported by previous thought: {thought.content}") | |
| return evidence | |
| def _generate_alternatives( | |
| self, | |
| content: str, | |
| context: Dict[str, Any] | |
| ) -> List[str]: | |
| """Generate alternative perspectives.""" | |
| alternatives = [] | |
| # Generate opposites | |
| words = content.lower().split() | |
| opposites = { | |
| 'increase': 'decrease', | |
| 'high': 'low', | |
| 'good': 'bad', | |
| 'positive': 'negative' | |
| } | |
| for word in words: | |
| if word in opposites: | |
| alt = content.replace(word, opposites[word]) | |
| alternatives.append(f"Alternative: {alt}") | |
| return alternatives | |
| def _determine_next_steps( | |
| self, | |
| type: ThoughtType, | |
| context: Dict[str, Any] | |
| ) -> List[str]: | |
| """Determine possible next steps.""" | |
| steps = [] | |
| if type == ThoughtType.OBSERVATION: | |
| steps.extend([ | |
| "Analyze key components", | |
| "Identify patterns", | |
| "Consider context" | |
| ]) | |
| elif type == ThoughtType.ANALYSIS: | |
| steps.extend([ | |
| "Generate hypotheses", | |
| "Look for correlations", | |
| "Consider alternatives" | |
| ]) | |
| elif type == ThoughtType.HYPOTHESIS: | |
| steps.extend([ | |
| "Verify hypothesis", | |
| "Gather evidence", | |
| "Test assumptions" | |
| ]) | |
| elif type == ThoughtType.VERIFICATION: | |
| steps.extend([ | |
| "Draw conclusions", | |
| "Consider implications", | |
| "Plan actions" | |
| ]) | |
| return steps | |
| def _extract_metadata( | |
| self, | |
| content: str, | |
| context: Dict[str, Any] | |
| ) -> Dict[str, Any]: | |
| """Extract metadata from thought content.""" | |
| return { | |
| 'length': len(content), | |
| 'complexity': len(content.split()), | |
| 'context_keys': list(context.keys()), | |
| 'timestamp': datetime.now().isoformat() | |
| } | |
| def _extract_components(self, query: str) -> List[str]: | |
| """Extract key components from query.""" | |
| # Simple word-based extraction | |
| # Could be enhanced with NLP | |
| return [w.strip() for w in query.split() if len(w.strip()) > 3] | |
| def _analyze_component( | |
| self, | |
| component: str, | |
| context: Dict[str, Any] | |
| ) -> str: | |
| """Analyze a single component.""" | |
| return f"Component {component} analysis based on context" | |
| def _generate_hypothesis( | |
| self, | |
| analysis: Thought, | |
| context: Dict[str, Any] | |
| ) -> str: | |
| """Generate hypothesis from analysis.""" | |
| return f"Hypothesis generated from {analysis.content}" | |
| def _verify_hypothesis( | |
| self, | |
| hypothesis: Thought, | |
| context: Dict[str, Any] | |
| ) -> str: | |
| """Verify a hypothesis.""" | |
| return f"Verification of {hypothesis.content}" | |
| def _synthesize_conclusion( | |
| self, | |
| verified_thoughts: List[Thought], | |
| context: Dict[str, Any] | |
| ) -> str: | |
| """Synthesize conclusion from verified thoughts.""" | |
| return "Conclusion based on verified thoughts: " + \ | |
| ", ".join(t.content for t in verified_thoughts) | |
| def _generate_reflection( | |
| self, | |
| thoughts: List[Thought], | |
| context: Dict[str, Any] | |
| ) -> str: | |
| """Generate reflection on thought chain.""" | |
| return f"Reflection on {len(thoughts)} thoughts in chain" | |
| def _generate_refinement( | |
| self, | |
| reflection: Thought, | |
| context: Dict[str, Any] | |
| ) -> str: | |
| """Generate refinement based on reflection.""" | |
| return f"Refinement based on {reflection.content}" | |
| def _update_metrics(self) -> None: | |
| """Update performance metrics.""" | |
| if self.thoughts: | |
| self.performance_metrics.update({ | |
| 'avg_confidence': sum(t.confidence for t in self.thoughts) / len(self.thoughts), | |
| 'chain_length': len(self.thoughts), | |
| 'parallel_paths': len([t for t in self.thoughts if t.alternatives]) | |
| }) | |