diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..16c1d89c64f112a8a71fa0b71b50ff18aded3b35 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +generated-icon.png filter=lfs diff=lfs merge=lfs -text diff --git a/App.tsx b/App.tsx new file mode 100644 index 0000000000000000000000000000000000000000..63b704228ddfc67550d70879e2f2af492fe126aa --- /dev/null +++ b/App.tsx @@ -0,0 +1,26 @@ +import { Switch, Route } from "wouter"; +import { queryClient } from "./lib/queryClient"; +import { QueryClientProvider } from "@tanstack/react-query"; +import { Toaster } from "@/components/ui/toaster"; +import NotFound from "@/pages/not-found"; +import Chat from "@/pages/chat"; + +function Router() { + return ( + + + + + ); +} + +function App() { + return ( + + + + + ); +} + +export default App; diff --git a/Pasted-import-React-useState-useEffect-useRef-from-react-import-motion-from-framer-motion--1742439890124.txt b/Pasted-import-React-useState-useEffect-useRef-from-react-import-motion-from-framer-motion--1742439890124.txt new file mode 100644 index 0000000000000000000000000000000000000000..bf4c2ee1b49db02da535f960fccb4737fc184e33 --- /dev/null +++ b/Pasted-import-React-useState-useEffect-useRef-from-react-import-motion-from-framer-motion--1742439890124.txt @@ -0,0 +1,320 @@ +import React, { useState, useEffect, useRef } from 'react'; +import { motion } from 'framer-motion'; +import _ from 'lodash'; + +const VisionOSCarousel = () => { + // State for carousel and navigation + const [activeChat, setActiveChat] = useState(0); + const [showModelDropdown, setShowModelDropdown] = useState(false); + const [showHistoryDropdown, setShowHistoryDropdown] = useState(false); + const [activeModel, setActiveModel] = useState('Vision GPT-4'); + const carouselRef = useRef(null); + const [isDragging, setIsDragging] = useState(false); + const [startX, setStartX] = useState(0); + + // Sample chat data + const chats = [ + { id: 1, title: "Project Atlas", preview: "Latest updates on the Atlas project integration..." }, + { id: 2, title: "Meeting Notes", preview: "AI summary of yesterday's team meeting..." }, + { id: 3, title: "Travel Planning", preview: "Vision's suggestions for your upcoming trip..." }, + { id: 4, title: "Code Review", preview: "Analysis of the new codebase structure..." } + ]; + + // AI models available in the system + const aiModels = [ + { id: 1, name: 'Vision GPT-4', icon: '🧠' }, + { id: 2, name: 'Vision Assistant', icon: '👁️' }, + { id: 3, name: 'Vision Coder', icon: '💻' }, + { id: 4, name: 'Vision Creative', icon: '🎨' } + ]; + + // Chat history entries + const chatHistory = [ + { id: 101, title: "Previous Projects", date: "Mar 15" }, + { id: 102, title: "System Setup", date: "Mar 10" }, + { id: 103, title: "Design Feedback", date: "Mar 5" }, + { id: 104, title: "Initial Consultation", date: "Feb 28" } + ]; + + // Handle swipe gesture for carousel + const handleTouchStart = (e) => { + setIsDragging(true); + setStartX(e.touches[0].clientX); + }; + + const handleTouchMove = (e) => { + if (!isDragging) return; + const currentX = e.touches[0].clientX; + const diff = startX - currentX; + + if (Math.abs(diff) > 50) { + if (diff > 0 && activeChat < chats.length - 1) { + setActiveChat(activeChat + 1); + setIsDragging(false); + } else if (diff < 0 && activeChat > 0) { + setActiveChat(activeChat - 1); + setIsDragging(false); + } + } + }; + + const handleTouchEnd = () => { + setIsDragging(false); + }; + + // Handle mouse swipe for desktop + const handleMouseDown = (e) => { + setIsDragging(true); + setStartX(e.clientX); + }; + + const handleMouseMove = (e) => { + if (!isDragging) return; + const currentX = e.clientX; + const diff = startX - currentX; + + if (Math.abs(diff) > 50) { + if (diff > 0 && activeChat < chats.length - 1) { + setActiveChat(activeChat + 1); + setIsDragging(false); + } else if (diff < 0 && activeChat > 0) { + setActiveChat(activeChat - 1); + setIsDragging(false); + } + } + }; + + const handleMouseUp = () => { + setIsDragging(false); + }; + + // Navigation functions + const nextChat = () => { + if (activeChat < chats.length - 1) { + setActiveChat(activeChat + 1); + } + }; + + const prevChat = () => { + if (activeChat > 0) { + setActiveChat(activeChat - 1); + } + }; + + const selectModel = (model) => { + setActiveModel(model.name); + setShowModelDropdown(false); + }; + + const selectHistoryChat = (chat) => { + // In a real app, this would load the historical chat + setShowHistoryDropdown(false); + }; + + return ( +
+ {/* Top Navigation Bar - iPhone Dynamic Island Inspired */} +
+ + + +
+ + +
+ + {/* Model Selection Dropdown */} + {showModelDropdown && ( + + {aiModels.map(model => ( +
selectModel(model)} + className="flex items-center space-x-2 p-2 hover:bg-blue-900/30 rounded-lg cursor-pointer transition-colors" + > + {model.icon} + {model.name} + {activeModel === model.name && } +
+ ))} +
+ )} + + {/* History Dropdown */} + {showHistoryDropdown && ( + + {chatHistory.map(chat => ( +
selectHistoryChat(chat)} + className="flex items-center justify-between p-2 hover:bg-blue-900/30 rounded-lg cursor-pointer transition-colors" + > + {chat.title} + {chat.date} +
+ ))} +
+ )} +
+ + {/* 3D Carousel Area */} +
+
+ {chats.map((chat, index) => { + // Calculate position and transform for 3D effect + const offset = index - activeChat; + const isActive = index === activeChat; + const zIndex = chats.length - Math.abs(offset); + + return ( + +
+
+

{chat.title}

+
+
+ +

{chat.preview}

+ + {/* Mock chat bubbles */} +
+
+ How can Vision help with this project? +
+
+ I can analyze the data and provide recommendations based on previous outcomes. +
+ {isActive && ( + + Great. Let's see what you can find. + + )} +
+ + {isActive && ( + + + + + )} +
+
+ ); + })} +
+
+ + {/* Bottom Home Bar - iPhone Style */} +
+ + + +
+ + +
+
+
+ ); +}; + +export default VisionOSCarousel; \ No newline at end of file diff --git a/QuantumVision.zip b/QuantumVision.zip new file mode 100644 index 0000000000000000000000000000000000000000..19bed42b9fecee9add4caf2e10eae74f31cd8985 --- /dev/null +++ b/QuantumVision.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8579ed427a571035e8035837e8ed195ed277140b969a931e3e03b3db6f5020f +size 1684042 diff --git a/accordion.tsx b/accordion.tsx new file mode 100644 index 0000000000000000000000000000000000000000..e6a723d06574ee5cec8b00759b98f3fbe1ac7cc9 --- /dev/null +++ b/accordion.tsx @@ -0,0 +1,56 @@ +import * as React from "react" +import * as AccordionPrimitive from "@radix-ui/react-accordion" +import { ChevronDown } from "lucide-react" + +import { cn } from "@/lib/utils" + +const Accordion = AccordionPrimitive.Root + +const AccordionItem = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AccordionItem.displayName = "AccordionItem" + +const AccordionTrigger = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + + svg]:rotate-180", + className + )} + {...props} + > + {children} + + + +)) +AccordionTrigger.displayName = AccordionPrimitive.Trigger.displayName + +const AccordionContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + +
{children}
+
+)) + +AccordionContent.displayName = AccordionPrimitive.Content.displayName + +export { Accordion, AccordionItem, AccordionTrigger, AccordionContent } diff --git a/alert-dialog.tsx b/alert-dialog.tsx new file mode 100644 index 0000000000000000000000000000000000000000..8722561cf6bda62d62f9a0c67730aefda971873a --- /dev/null +++ b/alert-dialog.tsx @@ -0,0 +1,139 @@ +import * as React from "react" +import * as AlertDialogPrimitive from "@radix-ui/react-alert-dialog" + +import { cn } from "@/lib/utils" +import { buttonVariants } from "@/components/ui/button" + +const AlertDialog = AlertDialogPrimitive.Root + +const AlertDialogTrigger = AlertDialogPrimitive.Trigger + +const AlertDialogPortal = AlertDialogPrimitive.Portal + +const AlertDialogOverlay = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName + +const AlertDialogContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + + +)) +AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName + +const AlertDialogHeader = ({ + className, + ...props +}: React.HTMLAttributes) => ( +
+) +AlertDialogHeader.displayName = "AlertDialogHeader" + +const AlertDialogFooter = ({ + className, + ...props +}: React.HTMLAttributes) => ( +
+) +AlertDialogFooter.displayName = "AlertDialogFooter" + +const AlertDialogTitle = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName + +const AlertDialogDescription = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AlertDialogDescription.displayName = + AlertDialogPrimitive.Description.displayName + +const AlertDialogAction = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName + +const AlertDialogCancel = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName + +export { + AlertDialog, + AlertDialogPortal, + AlertDialogOverlay, + AlertDialogTrigger, + AlertDialogContent, + AlertDialogHeader, + AlertDialogFooter, + AlertDialogTitle, + AlertDialogDescription, + AlertDialogAction, + AlertDialogCancel, +} diff --git a/alert.tsx b/alert.tsx new file mode 100644 index 0000000000000000000000000000000000000000..41fa7e0561a3fdb5f986c1213a35e563de740e96 --- /dev/null +++ b/alert.tsx @@ -0,0 +1,59 @@ +import * as React from "react" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const alertVariants = cva( + "relative w-full rounded-lg border p-4 [&>svg~*]:pl-7 [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-foreground", + { + variants: { + variant: { + default: "bg-background text-foreground", + destructive: + "border-destructive/50 text-destructive dark:border-destructive [&>svg]:text-destructive", + }, + }, + defaultVariants: { + variant: "default", + }, + } +) + +const Alert = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes & VariantProps +>(({ className, variant, ...props }, ref) => ( +
+)) +Alert.displayName = "Alert" + +const AlertTitle = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +AlertTitle.displayName = "AlertTitle" + +const AlertDescription = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +AlertDescription.displayName = "AlertDescription" + +export { Alert, AlertTitle, AlertDescription } diff --git a/aspect-ratio.tsx b/aspect-ratio.tsx new file mode 100644 index 0000000000000000000000000000000000000000..c4abbf37f217c715a0eaade7f45ac78600df419f --- /dev/null +++ b/aspect-ratio.tsx @@ -0,0 +1,5 @@ +import * as AspectRatioPrimitive from "@radix-ui/react-aspect-ratio" + +const AspectRatio = AspectRatioPrimitive.Root + +export { AspectRatio } diff --git a/atlas-intelligence-code.swift b/atlas-intelligence-code.swift new file mode 100644 index 0000000000000000000000000000000000000000..157c658efb2f5f48018b9673b13b100ed0d096fc --- /dev/null +++ b/atlas-intelligence-code.swift @@ -0,0 +1,1453 @@ +// ATLAS INTELLIGENCE SYSTEM +// A conceptual implementation integrating VisionOS, Apple Intelligence, and Tesla API + +// --------- 1. PROJECT STRUCTURE --------- +/* +AtlasIntelligence/ +├── Sources/ +│ ├── Core/ +│ │ ├── AtlasCore.swift +│ │ ├── NeuralEngine.swift +│ │ └── RecursiveThinking.swift +│ ├── Vision/ +│ │ ├── VisionInterface.swift +│ │ ├── SpatialComputing.swift +│ │ └── RealityViews.swift +│ ├── Integration/ +│ │ ├── AppleIntelligence.swift +│ │ ├── TeslaConnection.swift +│ │ └── iCloudSync.swift +│ ├── Payment/ +│ │ └── TapToPay.swift +│ └── App/ +│ └── AtlasApp.swift +└── Resources/ + └── Models/ + └── atlas_model.mlpackage +*/ + +// --------- 2. CORE AI ENGINE --------- + +// AtlasCore.swift - Main intelligence coordinator +import Foundation +import CoreML +import NaturalLanguage +import CreateML + +/// The central AI system for Atlas Intelligence +class AtlasCore { + // Core AI components + private let neuralEngine: NeuralEngine + private let recursiveThinking: RecursiveThinking + private let contextManager: ContextManager + + // Integration components + private let appleIntelligence: AppleIntelligence + private let teslaConnection: TeslaConnection + private let iCloudSync: iCloudSync + + // Initialize the system + init() { + self.neuralEngine = NeuralEngine() + self.recursiveThinking = RecursiveThinking() + self.contextManager = ContextManager() + + self.appleIntelligence = AppleIntelligence() + self.teslaConnection = TeslaConnection() + self.iCloudSync = iCloudSync() + + // Load ML models and initialize systems + loadModels() + } + + private func loadModels() { + // Load pre-trained CoreML models + do { + try neuralEngine.loadModel(named: "atlas_quantum_network") + try recursiveThinking.loadModel(named: "recursive_reasoning") + } catch { + print("Error loading models: \(error)") + } + } + + /// Process user input with multi-step reasoning + func processInput(_ input: String, context: UserContext) async throws -> AtlasResponse { + // First-level processing + let initialContext = contextManager.getCurrentContext() + let initialAnalysis = try await neuralEngine.analyze(input, withContext: initialContext) + + // Recursive thinking to refine understanding + let deepAnalysis = try await recursiveThinking.deepProcess( + input: input, + initialAnalysis: initialAnalysis, + maxRecursionDepth: 3 + ) + + // Generate response based on analysis + let response = try await generateResponse(from: deepAnalysis, context: context) + + // Update context for future interactions + contextManager.updateContext(with: input, response: response) + + return response + } + + /// Generate a coherent response based on the AI's analysis + private func generateResponse(from analysis: DeepAnalysis, context: UserContext) async throws -> AtlasResponse { + // Decision process to determine the best response + let intent = analysis.primaryIntent + + switch intent { + case .informationQuery: + return try await neuralEngine.generateInformationalResponse(for: analysis) + case .actionRequest: + return try await handleActionRequest(analysis, context: context) + case .conversation: + return try await neuralEngine.generateConversationalResponse(for: analysis) + default: + throw AtlasError.unrecognizedIntent + } + } + + /// Handle requests that require action (Tesla control, payments, etc.) + private func handleActionRequest(_ analysis: DeepAnalysis, context: UserContext) async throws -> AtlasResponse { + let actionType = analysis.actionRequest.type + + switch actionType { + case .teslaControl: + return try await teslaConnection.executeCommand(analysis.actionRequest) + case .payment: + return try await handlePaymentRequest(analysis.actionRequest, context: context) + case .appleSpatial: + return try await appleIntelligence.executeSpatialCommand(analysis.actionRequest) + default: + throw AtlasError.unsupportedAction + } + } + + /// Process payment requests + private func handlePaymentRequest(_ request: ActionRequest, context: UserContext) async throws -> AtlasResponse { + // Verify security context and authentication + guard context.isAuthenticated && context.paymentAuthorized else { + throw AtlasError.unauthorizedPayment + } + + // Process through secure payment channel + let paymentProcessor = TapToPay() + return try await paymentProcessor.processPayment(request.paymentDetails) + } +} + +// NeuralEngine.swift - Advanced neural network implementation +import Foundation +import CoreML +import NaturalLanguage + +/// Neural processing engine that drives core intelligence +class NeuralEngine { + private var model: MLModel? + private let embeddingProvider: NLEmbedding? + private let tokenizer: NLTokenizer + + init() { + self.tokenizer = NLTokenizer(unit: .word) + self.embeddingProvider = NLEmbedding.wordEmbedding(for: .english) + } + + /// Load a CoreML model for neural processing + func loadModel(named name: String) throws { + let modelURL = Bundle.main.url(forResource: name, withExtension: "mlmodelc")! + self.model = try MLModel(contentsOf: modelURL) + } + + /// Analyze user input with context + func analyze(_ input: String, withContext context: Context) async throws -> Analysis { + // Tokenize and process input + tokenizer.string = input + let tokens = tokenizer.tokens(for: input.startIndex.. [Float]? in + let word = String(input[token]) + return embeddingProvider?.vector(for: word)?.map { Float($0) } + } + + // Process through neural network + let inputFeatures = try createInputFeatures(embeddings: embeddings, context: context) + let prediction = try model?.prediction(from: inputFeatures) + + // Interpret model output into structured analysis + return try interpretModelOutput(prediction) + } + + /// Create ML features from text embeddings and context + private func createInputFeatures(embeddings: [[Float]], context: Context) throws -> MLFeatureProvider { + // Combine embeddings with context into ML features + // Implementation would depend on specific model architecture + fatalError("Implementation required based on model architecture") + } + + /// Interpret raw model output into structured analysis + private func interpretModelOutput(_ output: MLFeatureProvider?) throws -> Analysis { + // Transform model outputs into semantically meaningful structures + // Implementation would depend on model architecture and output format + fatalError("Implementation required based on model architecture") + } + + /// Generate informational responses based on analysis + func generateInformationalResponse(for analysis: DeepAnalysis) async throws -> AtlasResponse { + // Generate coherent, informative response based on analysis + // Uses the model to generate natural language from semantic representation + fatalError("Implementation required") + } + + /// Generate conversational responses + func generateConversationalResponse(for analysis: DeepAnalysis) async throws -> AtlasResponse { + // Generate natural-sounding conversational response + fatalError("Implementation required") + } +} + +// RecursiveThinking.swift - Implements multi-step reasoning +import Foundation +import CoreML + +/// Advanced reasoning capabilities with recursive processing +class RecursiveThinking { + private var model: MLModel? + + func loadModel(named name: String) throws { + let modelURL = Bundle.main.url(forResource: name, withExtension: "mlmodelc")! + self.model = try MLModel(contentsOf: modelURL) + } + + /// Process input through multiple reasoning steps + func deepProcess(input: String, initialAnalysis: Analysis, maxRecursionDepth: Int) async throws -> DeepAnalysis { + var currentAnalysis = initialAnalysis + var reasoning: [ReasoningStep] = [] + + // Recursive reasoning process + for depth in 0.. [String] { + // Implementation would generate relevant questions about current understanding + fatalError("Implementation required") + } + + /// Answer self-generated questions to refine understanding + private func answerSelfQuestion(_ question: String, currentAnalysis: Analysis) async throws -> Refinement { + // Implementation would answer questions using current understanding + fatalError("Implementation required") + } + + /// Integrate refinements into current analysis + private func integrateRefinements(_ analysis: Analysis, refinements: [Refinement]) -> Analysis { + // Implementation would update analysis with new insights + fatalError("Implementation required") + } + + /// Determine if refinement process is complete + private func isRefinementComplete(_ analysis: Analysis, reasoning: [ReasoningStep]) -> Bool { + // Implementation would check if further refinement would be beneficial + fatalError("Implementation required") + } +} + +// ContextManager.swift - Manages conversational context +import Foundation + +/// Manages and updates conversation context +class ContextManager { + private var contextHistory: [Context] = [] + + /// Get current context for processing + func getCurrentContext() -> Context { + return contextHistory.last ?? Context.empty + } + + /// Update context with new interaction + func updateContext(with input: String, response: AtlasResponse) { + let newContext = Context( + timestamp: Date(), + userInput: input, + systemResponse: response, + previousContext: getCurrentContext() + ) + + contextHistory.append(newContext) + + // Trim context history if too long + if contextHistory.count > 10 { + contextHistory.removeFirst() + } + } +} + +// --------- 3. VISION OS INTEGRATION --------- + +// VisionInterface.swift - Interface with VisionOS +import SwiftUI +import RealityKit +import RealityKitContent + +/// Manages integration with VisionOS spatial environment +struct VisionInterface { + private let spatialComputing: SpatialComputing + + init() { + self.spatialComputing = SpatialComputing() + } + + /// Create immersive UI element + func createImmersiveElement(for response: AtlasResponse) -> some View { + // If response includes spatial content + if response.hasSpatialContent { + return spatialComputing.createSpatialView(for: response) + } else { + // Return standard UI element + return AtlasResponseView(response: response) + } + } + + /// Process spatial gestures + func processGesture(_ gesture: SpatialGesture) -> GestureIntent { + return spatialComputing.interpretGesture(gesture) + } + + /// Interpret environment data + func processEnvironmentData(_ environmentData: EnvironmentData) -> EnvironmentContext { + return spatialComputing.analyzeEnvironment(environmentData) + } +} + +// SpatialComputing.swift - Handles spatial computing features +import RealityKit +import ARKit +import SwiftUI + +/// Core spatial computing capabilities +struct SpatialComputing { + /// Create a spatial interface for responses + func createSpatialView(for response: AtlasResponse) -> some View { + // Create appropriate spatial UI elements based on response type + switch response.spatialContentType { + case .floatingPanel: + return FloatingPanelView(content: response.content) + case .virtualObject: + return VirtualObjectView(model: response.modelContent) + case .environmentOverlay: + return EnvironmentOverlayView(overlay: response.overlayContent) + default: + return DefaultResponseView(response: response) + } + } + + /// Interpret spatial gestures + func interpretGesture(_ gesture: SpatialGesture) -> GestureIntent { + // Analyze gesture and determine user intent + switch gesture.type { + case .tap: + return GestureIntent.select(position: gesture.position) + case .swipe: + return GestureIntent.scroll(direction: gesture.direction) + case .pinch: + return GestureIntent.zoom(scale: gesture.scale) + default: + return GestureIntent.unknown + } + } + + /// Analyze spatial environment + func analyzeEnvironment(_ environmentData: EnvironmentData) -> EnvironmentContext { + // Process environment data to provide context to AI + let surfaces = detectSurfaces(from: environmentData.depthData) + let lighting = analyzeLighting(from: environmentData.lightEstimate) + let objects = identifyObjects(from: environmentData.sceneReconstruction) + + return EnvironmentContext( + surfaces: surfaces, + lighting: lighting, + identifiedObjects: objects, + spatialAnchors: environmentData.anchors + ) + } + + // Helper functions for environment analysis + private func detectSurfaces(from depthData: ARDepthData) -> [Surface] { + // Implementation to detect surfaces from depth data + fatalError("Implementation required") + } + + private func analyzeLighting(from lightEstimate: ARLightEstimate) -> LightingContext { + // Implementation to analyze lighting conditions + fatalError("Implementation required") + } + + private func identifyObjects(from sceneReconstruction: ARSceneReconstruction) -> [IdentifiedObject] { + // Implementation to identify objects in environment + fatalError("Implementation required") + } +} + +// RealityViews.swift - SwiftUI views for VisionOS +import SwiftUI +import RealityKit + +/// Floating panel view for Atlas responses +struct FloatingPanelView: View { + let content: String + + var body: some View { + VStack { + Text("Atlas Intelligence") + .font(.headline) + + Divider() + + Text(content) + .padding() + + // Interactive elements would go here + HStack { + Button("More") { + // Expand view + } + + Spacer() + + Button("Respond") { + // Activate voice response + } + } + .padding() + } + .frame(width: 400, height: 300) + .background(.ultraThinMaterial) + .cornerRadius(20) + .hoverEffect() + } +} + +/// Virtual 3D object view +struct VirtualObjectView: View { + let model: ModelEntity + + var body: some View { + RealityView { content in + content.add(model) + } + .gesture(TapGesture().onEnded { _ in + // Handle interaction with virtual object + }) + } +} + +/// Environment overlay view +struct EnvironmentOverlayView: View { + let overlay: EnvironmentOverlay + + var body: some View { + ZStack { + // Render overlay elements + ForEach(overlay.elements) { element in + element.view + .position(element.position) + } + } + } +} + +/// Default response view +struct DefaultResponseView: View { + let response: AtlasResponse + + var body: some View { + Text(response.content) + .padding() + .background(.ultraThinMaterial) + .cornerRadius(12) + } +} + +// --------- 4. INTEGRATION COMPONENTS --------- + +// AppleIntelligence.swift - Integration with Apple's AI systems +import Foundation +import NaturalLanguage +import Vision +import SoundAnalysis + +/// Integration with Apple Intelligence +class AppleIntelligence { + private let audioEngine: SNAudioEngine + private let visionProcessor: VNImageRequestHandler + + init() { + self.audioEngine = SNAudioEngine() + self.visionProcessor = VNImageRequestHandler() + } + + /// Execute spatial commands using Apple Intelligence + func executeSpatialCommand(_ command: ActionRequest) async throws -> AtlasResponse { + // Delegate to appropriate Apple system based on command + switch command.spatialType { + case .objectDetection: + return try await performObjectDetection(command) + case .spatialAudio: + return try await configureSpatialAudio(command) + case .textRecognition: + return try await performTextRecognition(command) + default: + throw IntegrationError.unsupportedSpatialCommand + } + } + + /// Perform object detection + private func performObjectDetection(_ command: ActionRequest) async throws -> AtlasResponse { + // Implementation would use Vision framework for object detection + fatalError("Implementation required") + } + + /// Configure spatial audio + private func configureSpatialAudio(_ command: ActionRequest) async throws -> AtlasResponse { + // Implementation would configure spatial audio + fatalError("Implementation required") + } + + /// Perform text recognition in environment + private func performTextRecognition(_ command: ActionRequest) async throws -> AtlasResponse { + // Implementation would use Vision framework for text recognition + fatalError("Implementation required") + } +} + +// TeslaConnection.swift - Integration with Tesla API +import Foundation + +/// Integration with Tesla vehicles +class TeslaConnection { + private let apiClient: TeslaAPIClient + private var authToken: String? + + init() { + self.apiClient = TeslaAPIClient() + } + + /// Authenticate with Tesla API + func authenticate(using credentials: TeslaCredentials) async throws { + self.authToken = try await apiClient.authenticate(credentials) + } + + /// Execute commands on Tesla vehicle + func executeCommand(_ command: ActionRequest) async throws -> AtlasResponse { + guard let authToken = authToken else { + throw TeslaError.notAuthenticated + } + + // Execute command via Tesla API + switch command.teslaCommandType { + case .climate: + return try await executeClimateCommand(command, token: authToken) + case .charging: + return try await executeChargingCommand(command, token: authToken) + case .vehicle: + return try await executeVehicleCommand(command, token: authToken) + default: + throw TeslaError.unsupportedCommand + } + } + + /// Tesla API interaction for climate controls + private func executeClimateCommand(_ command: ActionRequest, token: String) async throws -> AtlasResponse { + let response = try await apiClient.executeClimateCommand( + vehicleId: command.vehicleId, + settings: command.climateSettings, + token: token + ) + + return AtlasResponse( + content: "Climate control settings updated. Current temperature: \(response.currentTemp)°F", + status: .success, + actionPerformed: .teslaClimateControl + ) + } + + /// Tesla API interaction for charging functions + private func executeChargingCommand(_ command: ActionRequest, token: String) async throws -> AtlasResponse { + let response = try await apiClient.executeChargingCommand( + vehicleId: command.vehicleId, + settings: command.chargingSettings, + token: token + ) + + return AtlasResponse( + content: "Charging settings updated. Current charge level: \(response.chargeLevel)%, estimated completion: \(response.estimatedCompletion)", + status: .success, + actionPerformed: .teslaChargingControl + ) + } + + /// Tesla API interaction for vehicle functions + private func executeVehicleCommand(_ command: ActionRequest, token: String) async throws -> AtlasResponse { + let response = try await apiClient.executeVehicleCommand( + vehicleId: command.vehicleId, + command: command.vehicleCommand, + parameters: command.vehicleParameters, + token: token + ) + + return AtlasResponse( + content: "Vehicle command executed: \(response.commandExecuted)", + status: response.success ? .success : .failure, + actionPerformed: .teslaVehicleControl + ) + } +} + +/// Tesla API client for direct API interactions +class TeslaAPIClient { + private let baseURL = URL(string: "https://owner-api.teslamotors.com/api/1")! + + /// Authenticate with Tesla API + func authenticate(_ credentials: TeslaCredentials) async throws -> String { + // Implementation would handle OAuth authentication with Tesla + // Returns auth token + fatalError("Implementation required") + } + + /// Execute climate control commands + func executeClimateCommand(vehicleId: String, settings: ClimateSettings, token: String) async throws -> ClimateResponse { + // Implementation would execute climate control API calls + fatalError("Implementation required") + } + + /// Execute charging commands + func executeChargingCommand(vehicleId: String, settings: ChargingSettings, token: String) async throws -> ChargingResponse { + // Implementation would execute charging API calls + fatalError("Implementation required") + } + + /// Execute general vehicle commands + func executeVehicleCommand(vehicleId: String, command: String, parameters: [String: Any], token: String) async throws -> VehicleCommandResponse { + // Implementation would execute vehicle command API calls + fatalError("Implementation required") + } +} + +// iCloudSync.swift - Integration with iCloud +import Foundation +import CloudKit + +/// iCloud data synchronization +class iCloudSync { + private let container: CKContainer + private let database: CKDatabase + + init() { + self.container = CKContainer.default() + self.database = container.privateCloudDatabase + } + + /// Save user preferences to iCloud + func savePreferences(_ preferences: UserPreferences) async throws { + let record = CKRecord(recordType: "AtlasPreferences") + record["settings"] = try JSONEncoder().encode(preferences) + + try await database.save(record) + } + + /// Load user preferences from iCloud + func loadPreferences() async throws -> UserPreferences { + let query = CKQuery(recordType: "AtlasPreferences", predicate: NSPredicate(value: true)) + let result = try await database.records(matching: query) + + guard let record = result.matchResults.first?.1.get() else { + return UserPreferences.default + } + + guard let data = record["settings"] as? Data else { + return UserPreferences.default + } + + return try JSONDecoder().decode(UserPreferences.self, from: data) + } + + /// Store interaction history + func saveInteractionHistory(_ history: InteractionHistory) async throws { + let record = CKRecord(recordType: "AtlasHistory") + record["history"] = try JSONEncoder().encode(history) + record["timestamp"] = Date() + + try await database.save(record) + } + + /// Load interaction history from iCloud + func loadInteractionHistory(limit: Int = 100) async throws -> [InteractionHistory] { + let query = CKQuery( + recordType: "AtlasHistory", + predicate: NSPredicate(value: true) + ) + query.sortDescriptors = [NSSortDescriptor(key: "timestamp", ascending: false)] + + let result = try await database.records(matching: query, resultsLimit: limit) + + return try result.matchResults.compactMap { _, recordResult in + let record = try recordResult.get() + guard let data = record["history"] as? Data else { return nil } + return try JSONDecoder().decode(InteractionHistory.self, from: data) + } + } +} + +// --------- 5. PAYMENT INTEGRATION --------- + +// TapToPay.swift - Apple Pay integration +import Foundation +import PassKit + +/// Tap to Pay payment processing +class TapToPay: NSObject, PKPaymentAuthorizationControllerDelegate { + private var completion: ((Result) -> Void)? + + /// Process a payment request + func processPayment(_ details: PaymentDetails) async throws -> AtlasResponse { + // Verify security requirements + guard details.isSecureContext else { + throw PaymentError.unsecureContext + } + + // Configure payment request + let request = createPaymentRequest(from: details) + + // Process payment through Apple Pay + return try await withCheckedThrowingContinuation { continuation in + self.completion = { result in + switch result { + case .success(let response): + let atlasResponse = AtlasResponse( + content: "Payment of \(details.amount) \(details.currency) completed successfully.", + status: .success, + actionPerformed: .payment + ) + continuation.resume(returning: atlasResponse) + + case .failure(let error): + continuation.resume(throwing: error) + } + } + + let controller = PKPaymentAuthorizationController(paymentRequest: request) + controller.delegate = self + controller.present(completion: { presented in + if !presented { + self.completion?(.failure(PaymentError.presentationFailed)) + } + }) + } + } + + /// Create Apple Pay payment request + private func createPaymentRequest(from details: PaymentDetails) -> PKPaymentRequest { + let request = PKPaymentRequest() + + request.merchantIdentifier = "merchant.com.atlas.intelligence" + request.countryCode = details.countryCode + request.currencyCode = details.currency + request.supportedNetworks = [.visa, .masterCard, .amex] + request.merchantCapabilities = [.capability3DS, .capabilityDebit, .capabilityCredit] + + // Add payment items + let total = PKPaymentSummaryItem( + label: details.description, + amount: NSDecimalNumber(value: details.amount) + ) + request.paymentSummaryItems = [total] + + return request + } + + // MARK: - PKPaymentAuthorizationControllerDelegate + + func paymentAuthorizationController(_ controller: PKPaymentAuthorizationController, + didAuthorizePayment payment: PKPayment, + handler completion: @escaping (PKPaymentAuthorizationResult) -> Void) { + // Process payment with payment processor + processPaymentWithProcessor(payment) { success, error in + if success { + self.completion?(.success(PaymentResponse( + transactionId: UUID().uuidString, + timestamp: Date() + ))) + completion(PKPaymentAuthorizationResult(status: .success, errors: nil)) + } else { + self.completion?(.failure(error ?? PaymentError.unknown)) + completion(PKPaymentAuthorizationResult(status: .failure, errors: [error].compactMap { $0 as NSError })) + } + } + } + + func paymentAuthorizationControllerDidFinish(_ controller: PKPaymentAuthorizationController) { + controller.dismiss {} + } + + /// Process payment with backend payment processor + private func processPaymentWithProcessor(_ payment: PKPayment, completion: @escaping (Bool, Error?) -> Void) { + // In a real app, this would connect to your payment processor + // For this example, we'll simulate a successful payment + DispatchQueue.main.asyncAfter(deadline: .now() + 1) { + completion(true, nil) + } + } +} + +// --------- 6. APP IMPLEMENTATION --------- + +// AtlasApp.swift - Main app implementation +import SwiftUI + +/// Main Atlas Intelligence app +@main +struct AtlasApp: App { + // Core systems + @StateObject private var atlasCore = AtlasCoreViewModel() + + // Environment objects + @StateObject private var visionSystem = VisionSystem() + @StateObject private var teslaSystem = TeslaSystem() + + var body: some Scene { + WindowGroup { + ContentView() + .environmentObject(atlasCore) + .environmentObject(visionSystem) + .environmentObject(teslaSystem) + } + + // Add immersive space for VisionOS + ImmersiveSpace(id: "AtlasSpace") { + AtlasImmersiveView() + .environmentObject(atlasCore) + .environmentObject(visionSystem) + } + } +} + +/// Main content view +struct ContentView: View { + @EnvironmentObject var atlasCore: AtlasCoreViewModel + @State private var userInput: String = "" + + var body: some View { + VStack { + // Response display area + ScrollView { + VStack(alignment: .leading) { + ForEach(atlasCore.conversationHistory) { item in + ConversationItemView(item: item) + } + } + .padding() + } + + // Input area + HStack { + TextField("Ask Atlas...", text: $userInput) + .textFieldStyle(RoundedBorderTextFieldStyle()) + .padding() + + Button(action: { + Task { + await atlasCore.processInput(userInput) + userInput = "" + } + }) { + Image(systemName: "arrow.up.circle.fill") + .resizable() + .frame(width: 30, height: 30) + } + .padding(.trailing) + } + } + } +} + +/// Immersive view for VisionOS +struct AtlasImmersiveView: View { + @EnvironmentObject var atlasCore: AtlasCoreViewModel + @EnvironmentObject var visionSystem: VisionSystem + + var body: some View { + ZStack { + // Render spatial elements based on context + if let spatialContent = atlasCore.currentSpatialContent { + ForEach(spatialContent.elements) { element in + element.view + .position(element.position) + } + } + + // Voice indicator when speaking + if atlasCore.isProcessing { + VoiceProcessingIndicator() + } + } + } +} + +/// View model for Atlas Core +class AtlasCoreViewModel: ObservableObject { + private let atlasCore = AtlasCore() + + @Published var conversationHistory: [ConversationItem] = [] + @Published var isProcessing: Bool = false + @Published var currentSpatialContent: SpatialContent? + + /// Process user input + func processInput(_ input: String) async { + guard !input.isEmpty else { return } + + await MainActor.run { + isProcessing = true + // Add user message to conversation + conversationHistory.append(ConversationItem( + id: UUID(), + text: input, + isUser: true, + timestamp: Date() + )) + } + + do { + // Process through Atlas Core + let context = UserContext( + isAuthenticated: true, + paymentAuthorized: false, + spatialContext: nil + ) + + let response = try await atlasCore.processInput(input, context: context) + + await MainActor.run { + // Add response to conversation + conversationHistory.append(ConversationItem( + id: UUID(), + text: response.content, + isUser: false, + timestamp: Date() + )) + + // Update spatial content if available + if response.hasSpatialContent { + currentSpatialContent = response.spatialContent + } + + isProcessing = false + } + } catch { + await MainActor.run { + // Add error response + conversationHistory.append(ConversationItem( + id: UUID(), + text: "Sorry, I encountered an error: \(error.localizedDescription)", + isUser: false, + timestamp: Date() + )) + + isProcessing = false + } + } + } +} + +// --------- 7. DATA MODELS --------- + +/// User context for request processing +struct UserContext { + let isAuthenticated: Bool + let paymentAuthorized: Bool + let spatialContext: SpatialContext? +} + +/// Conversation context +struct Context { + let timestamp: Date + let userInput: String + let systemResponse: AtlasResponse + let previousContext: Context? + + static var empty: Context { + return Context( + timestamp: Date(), + userInput: "", + systemResponse: AtlasResponse(content: "", status: .unknown, actionPerformed: nil), + previousContext: nil + ) + } +} + +/// Atlas response data model +struct AtlasResponse { + let content: String + let status: ResponseStatus + let actionPerformed: ActionType? + var hasSpatialContent: Bool = false + var spatialContent: SpatialContent? + var spatialContentType: SpatialContentType? + var modelContent: ModelEntity? + var overlayContent: EnvironmentOverlay? + + enum ResponseStatus { + case success + case failure + case unknown + } + + enum ActionType { + case teslaClimateControl + case teslaChargingControl + case teslaVehicleControl + case payment + case spatialVisualization + } + + enum SpatialContentType { + case floatingPanel + case virtualObject + case environmentOverlay + case none + } +} + +/// Data model for conversation items +struct ConversationItem: Identifiable { + let id: UUID + let text: String + let isUser: Bool + let timestamp: Date +} + +/// Spatial content model +struct SpatialContent { + var elements: [SpatialElement] + + struct SpatialElement: Identifiable { + let id: UUID + let position: CGPoint + let view: AnyView + } +} + +/// Environment overlay model +struct EnvironmentOverlay { + var elements: [OverlayElement] + + struct OverlayElement: Identifiable { + let id: UUID + let position: CGPoint + let view: AnyView + } +} + +/// Model for deep thought analysis +struct DeepAnalysis { + let baseAnalysis: Analysis + let refinedAnalysis: Analysis + let reasoningSteps: [ReasoningStep] + + var primaryIntent: Intent { + return refinedAnalysis.intent + } + + var actionRequest: ActionRequest { + return refinedAnalysis.actionRequest + } + + enum Intent { + case informationQuery + case actionRequest + case conversation + case unknown + } +} + +/// Initial analysis structure +struct Analysis { + let intent: DeepAnalysis.Intent + let entities: [Entity] + let sentiment: Float + let actionRequest: ActionRequest + + struct Entity { + let text: String + let type: EntityType + + enum EntityType { + case person + case location + case date + case organization + case product + case custom(String) + } + } +} + +/// Action request model +struct ActionRequest { + let type: ActionType + let payload: [String: Any] + + // Tesla-specific properties + var vehicleId: String { + return payload["vehicleId"] as? String ?? "" + } + + var teslaCommandType: TeslaCommandType { + let rawValue = payload["teslaCommandType"] as? String ?? "" + return TeslaCommandType(rawValue: rawValue) ?? .unknown + } + + var climateSettings: ClimateSettings { + return payload["climateSettings"] as? ClimateSettings ?? ClimateSettings() + } + + var chargingSettings: ChargingSettings { + return payload["chargingSettings"] as? ChargingSettings ?? ChargingSettings() + } + + var vehicleCommand: String { + return payload["vehicleCommand"] as? String ?? "" + } + + var vehicleParameters: [String: Any] { + return payload["vehicleParameters"] as? [String: Any] ?? [:] + } + + // Spatial-specific properties + var spatialType: SpatialCommandType { + let rawValue = payload["spatialType"] as? String ?? "" + return SpatialCommandType(rawValue: rawValue) ?? .unknown + } + + // Payment-specific properties + var paymentDetails: PaymentDetails { + return payload["paymentDetails"] as? PaymentDetails ?? PaymentDetails() + } + + var isSecureContext: Bool { + return payload["isSecureContext"] as? Bool ?? false + } + + enum ActionType { + case teslaControl + case payment + case appleSpatial + case unknown + } + + enum TeslaCommandType: String { + case climate + case charging + case vehicle + case unknown + } + + enum SpatialCommandType: String { + case objectDetection + case spatialAudio + case textRecognition + case unknown + } +} + +/// Step in the recursive thinking process +struct ReasoningStep { + let depth: Int + let questions: [String] + let refinements: [Refinement] + + struct Refinement { + let question: String + let answer: String + let confidenceScore: Float + } +} + +/// Climate settings for Tesla +struct ClimateSettings { + var targetTemperature: Float = 70.0 + var acEnabled: Bool = true + var seatHeaters: [Int: Int] = [:] + var defrostMode: Bool = false +} + +/// Charging settings for Tesla +struct ChargingSettings { + var chargeLimit: Int = 80 + var scheduleEnabled: Bool = false + var scheduleTime: Date? +} + +/// Response from climate control command +struct ClimateResponse { + let success: Bool + let currentTemp: Float + let targetTemp: Float + let acStatus: Bool +} + +/// Response from charging command +struct ChargingResponse { + let success: Bool + let chargeLevel: Int + let estimatedCompletion: String + let chargingState: String +} + +/// Response from vehicle command +struct VehicleCommandResponse { + let success: Bool + let commandExecuted: String + let result: [String: Any] +} + +/// Payment details model +struct PaymentDetails { + var amount: Double = 0 + var currency: String = "USD" + var description: String = "" + var countryCode: String = "US" + var isSecureContext: Bool = false +} + +/// Payment response model +struct PaymentResponse { + let transactionId: String + let timestamp: Date +} + +/// User preferences model +struct UserPreferences: Codable { + var theme: Theme + var notifications: NotificationPreferences + var accessibility: AccessibilityPreferences + + static var `default`: UserPreferences { + return UserPreferences( + theme: .system, + notifications: NotificationPreferences(), + accessibility: AccessibilityPreferences() + ) + } + + enum Theme: String, Codable { + case light + case dark + case system + } + + struct NotificationPreferences: Codable { + var enabled: Bool = true + var soundEnabled: Bool = true + var hapticEnabled: Bool = true + } + + struct AccessibilityPreferences: Codable { + var largeText: Bool = false + var highContrast: Bool = false + var reduceMotion: Bool = false + } +} + +/// Interaction history model +struct InteractionHistory: Codable { + let timestamp: Date + let userInput: String + let systemResponse: String + let actions: [String] +} + +// --------- 8. ERROR TYPES --------- + +/// Atlas system errors +enum AtlasError: Error { + case unrecognizedIntent + case unsupportedAction + case unauthorizedPayment + case modelLoadingFailed +} + +/// Integration errors +enum IntegrationError: Error { + case unsupportedSpatialCommand + case appleIntelligenceNotAvailable +} + +/// Tesla API errors +enum TeslaError: Error { + case notAuthenticated + case unsupportedCommand + case apiError(String) + case vehicleOffline +} + +/// Payment errors +enum PaymentError: Error { + case unsecureContext + case authorizationFailed + case presentationFailed + case processingFailed + case unknown +} + +// --------- 9. ADDITIONAL SUPPORTING COMPONENTS --------- + +/// Tesla credentials model +struct TeslaCredentials { + let email: String + let password: String + let mfaCode: String? +} + +/// Environment data for spatial computing +struct EnvironmentData { + let depthData: ARDepthData + let lightEstimate: ARLightEstimate + let sceneReconstruction: ARSceneReconstruction + let anchors: [ARAnchor] +} + +/// Environment context for AI processing +struct EnvironmentContext { + let surfaces: [Surface] + let lighting: LightingContext + let identifiedObjects: [IdentifiedObject] + let spatialAnchors: [ARAnchor] + + struct Surface { + let plane: Plane + let classification: SurfaceClassification + + enum SurfaceClassification { + case floor + case wall + case ceiling + case table + case unknown + } + } + + struct LightingContext { + let intensity: Float + let temperature: Float + let direction: SIMD3? + } + + struct IdentifiedObject { + let identifier: String + let confidence: Float + let boundingBox: CGRect + } +} + +/// Gesture recognition types +struct SpatialGesture { + let type: GestureType + let position: CGPoint + let direction: SIMD2? + let scale: Float? + + enum GestureType { + case tap + case swipe + case pinch + case rotate + case unknown + } +} + +/// Gesture intent interpretation +enum GestureIntent { + case select(position: CGPoint) + case scroll(direction: SIMD2?) + case zoom(scale: Float?) + case unknown +} + +/// Voice processing indicator view +struct VoiceProcessingIndicator: View { + @State private var animationValue: CGFloat = 0.0 + + var body: some View { + Circle() + .fill(Color.blue.opacity(0.5)) + .frame(width: 100, height: 100) + .scaleEffect(1.0 + animationValue) + .opacity(1.0 - animationValue) + .onAppear { + withAnimation(Animation.easeInOut(duration: 1.0).repeatForever(autoreverses: false)) { + animationValue = 1.0 + } + } + } +} + +/// Environment classes for SwiftUI previews +class VisionSystem: ObservableObject {} +class TeslaSystem: ObservableObject {} + +/// Conversation item view +struct ConversationItemView: View { + let item: ConversationItem + + var body: some View { + HStack { + if item.isUser { + Spacer() + Text(item.text) + .padding() + .background(Color.blue.opacity(0.2)) + .cornerRadius(12) + } else { + Text(item.text) + .padding() + .background(Color.gray.opacity(0.2)) + .cornerRadius(12) + Spacer() + } + } + .padding(.vertical, 4) + } +} \ No newline at end of file diff --git a/avatar.tsx b/avatar.tsx new file mode 100644 index 0000000000000000000000000000000000000000..991f56ecb117e96284bf0f6cad3b14ea2fdf5264 --- /dev/null +++ b/avatar.tsx @@ -0,0 +1,48 @@ +import * as React from "react" +import * as AvatarPrimitive from "@radix-ui/react-avatar" + +import { cn } from "@/lib/utils" + +const Avatar = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +Avatar.displayName = AvatarPrimitive.Root.displayName + +const AvatarImage = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AvatarImage.displayName = AvatarPrimitive.Image.displayName + +const AvatarFallback = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName + +export { Avatar, AvatarImage, AvatarFallback } diff --git a/badge.tsx b/badge.tsx new file mode 100644 index 0000000000000000000000000000000000000000..f000e3ef5176395b067dfc3f3e1256a80c450015 --- /dev/null +++ b/badge.tsx @@ -0,0 +1,36 @@ +import * as React from "react" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const badgeVariants = cva( + "inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2", + { + variants: { + variant: { + default: + "border-transparent bg-primary text-primary-foreground hover:bg-primary/80", + secondary: + "border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80", + destructive: + "border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80", + outline: "text-foreground", + }, + }, + defaultVariants: { + variant: "default", + }, + } +) + +export interface BadgeProps + extends React.HTMLAttributes, + VariantProps {} + +function Badge({ className, variant, ...props }: BadgeProps) { + return ( +
+ ) +} + +export { Badge, badgeVariants } diff --git a/breadcrumb.tsx b/breadcrumb.tsx new file mode 100644 index 0000000000000000000000000000000000000000..60e6c96f72f0350d08b47e4730cab8f3975dc853 --- /dev/null +++ b/breadcrumb.tsx @@ -0,0 +1,115 @@ +import * as React from "react" +import { Slot } from "@radix-ui/react-slot" +import { ChevronRight, MoreHorizontal } from "lucide-react" + +import { cn } from "@/lib/utils" + +const Breadcrumb = React.forwardRef< + HTMLElement, + React.ComponentPropsWithoutRef<"nav"> & { + separator?: React.ReactNode + } +>(({ ...props }, ref) =>