diff --git a/apps/bare_rn/App.tsx b/apps/bare_rn/App.tsx
index 2e7925507..b7e28658d 100644
--- a/apps/bare_rn/App.tsx
+++ b/apps/bare_rn/App.tsx
@@ -83,9 +83,64 @@ const spinnerStyles = StyleSheet.create({
},
});
+function ErrorBanner({
+ message,
+ onDismiss,
+}: {
+ message: string | null;
+ onDismiss: () => void;
+}) {
+ if (!message) return null;
+ return (
+
+
+ {message}
+
+
+ ✕
+
+
+ );
+}
+
+const errorBannerStyles = StyleSheet.create({
+ container: {
+ backgroundColor: '#FEE2E2',
+ borderLeftWidth: 4,
+ borderLeftColor: '#EF4444',
+ borderRadius: 8,
+ marginHorizontal: 16,
+ marginVertical: 8,
+ paddingVertical: 10,
+ paddingLeft: 12,
+ paddingRight: 8,
+ flexDirection: 'row',
+ alignItems: 'center',
+ },
+ message: {
+ flex: 1,
+ color: '#991B1B',
+ fontSize: 14,
+ lineHeight: 20,
+ },
+ closeButton: {
+ padding: 4,
+ marginLeft: 8,
+ },
+ closeText: {
+ color: '#991B1B',
+ fontSize: 16,
+ fontWeight: '600',
+ },
+});
+
function App() {
const [userInput, setUserInput] = useState('');
const [isTextInputFocused, setIsTextInputFocused] = useState(false);
+ const [error, setError] = useState(null);
const textInputRef = useRef(null);
const scrollViewRef = useRef(null);
@@ -98,9 +153,7 @@ function App() {
// } });
useEffect(() => {
- if (llm.error) {
- console.log('LLM error:', llm.error);
- }
+ if (llm.error) setError(String(llm.error));
}, [llm.error]);
const sendMessage = async () => {
@@ -111,7 +164,7 @@ function App() {
try {
await llm.sendMessage(userInput);
} catch (e) {
- console.error(e);
+ setError(e instanceof Error ? e.message : String(e));
}
};
@@ -123,11 +176,12 @@ function App() {
keyboardVerticalOffset={Platform.OS === 'ios' ? 100 : 0}
>
+ setError(null)} />
{llm.messageHistory.length > 0 || llm.isGenerating ? (
(
@@ -18,12 +19,19 @@ export default function ClassificationScreen() {
const [imageUri, setImageUri] = useState('');
const [inferenceTime, setInferenceTime] = useState(null);
+ const [error, setError] = useState(null);
+
const model = useClassification({ model: EFFICIENTNET_V2_S_QUANTIZED });
const { setGlobalGenerating } = useContext(GeneratingContext);
+
useEffect(() => {
setGlobalGenerating(model.isGenerating);
}, [model.isGenerating, setGlobalGenerating]);
+ useEffect(() => {
+ if (model.error) setError(String(model.error));
+ }, [model.error]);
+
const handleCameraPress = async (isCamera: boolean) => {
const image = await getImage(isCamera);
const uri = image?.uri;
@@ -46,21 +54,24 @@ export default function ClassificationScreen() {
.map(([label, score]) => ({ label, score: score as number }));
setResults(top10);
} catch (e) {
- console.error(e);
+ setError(e instanceof Error ? e.message : String(e));
}
}
};
- if (!model.isReady) {
+ if (!model.isReady && !model.error) {
return (
);
}
+
return (
+ setError(null)} />
+
[] = [
{ label: 'RF-DeTR Nano', value: RF_DETR_NANO },
{ label: 'SSDLite MobileNet', value: SSDLITE_320_MOBILENET_V3_LARGE },
];
+import ErrorBanner from '../../components/ErrorBanner';
export default function ObjectDetectionScreen() {
const [imageUri, setImageUri] = useState('');
const [results, setResults] = useState([]);
+ const [error, setError] = useState(null);
const [imageDimensions, setImageDimensions] = useState<{
width: number;
height: number;
@@ -38,6 +40,10 @@ export default function ObjectDetectionScreen() {
setGlobalGenerating(model.isGenerating);
}, [model.isGenerating, setGlobalGenerating]);
+ useEffect(() => {
+ if (model.error) setError(String(model.error));
+ }, [model.error]);
+
const handleCameraPress = async (isCamera: boolean) => {
const image = await getImage(isCamera);
const uri = image?.uri;
@@ -60,7 +66,7 @@ export default function ObjectDetectionScreen() {
setInferenceTime(Date.now() - start);
setResults(output);
} catch (e) {
- console.error(e);
+ setError(e instanceof Error ? e.message : String(e));
}
}
};
@@ -76,6 +82,7 @@ export default function ObjectDetectionScreen() {
return (
+ setError(null)} />
{imageUri && imageDimensions?.width && imageDimensions?.height ? (
diff --git a/apps/computer-vision/app/ocr/index.tsx b/apps/computer-vision/app/ocr/index.tsx
index 7033061a3..e46828798 100644
--- a/apps/computer-vision/app/ocr/index.tsx
+++ b/apps/computer-vision/app/ocr/index.tsx
@@ -31,10 +31,12 @@ const MODELS: ModelOption[] = [
{ label: 'Japanese', value: OCR_JAPANESE },
{ label: 'Korean', value: OCR_KOREAN },
];
+import ErrorBanner from '../../components/ErrorBanner';
export default function OCRScreen() {
const [imageUri, setImageUri] = useState('');
const [results, setResults] = useState([]);
+ const [error, setError] = useState(null);
const [imageDimensions, setImageDimensions] = useState<{
width: number;
height: number;
@@ -51,6 +53,10 @@ export default function OCRScreen() {
setGlobalGenerating(model.isGenerating);
}, [model.isGenerating, setGlobalGenerating]);
+ useEffect(() => {
+ if (model.error) setError(String(model.error));
+ }, [model.error]);
+
const handleCameraPress = async (isCamera: boolean) => {
const image = await getImage(isCamera);
const width = image?.width;
@@ -71,14 +77,14 @@ export default function OCRScreen() {
setInferenceTime(Date.now() - start);
setResults(output);
} catch (e) {
- console.error(e);
+ setError(e instanceof Error ? e.message : String(e));
}
};
- if (!model.isReady) {
+ if (!model.isReady && !model.error) {
return (
);
@@ -86,6 +92,7 @@ export default function OCRScreen() {
return (
+ setError(null)} />
{imageUri && imageDimensions?.width && imageDimensions?.height ? (
diff --git a/apps/computer-vision/app/ocr_vertical/index.tsx b/apps/computer-vision/app/ocr_vertical/index.tsx
index b42a9055f..90d052d8b 100644
--- a/apps/computer-vision/app/ocr_vertical/index.tsx
+++ b/apps/computer-vision/app/ocr_vertical/index.tsx
@@ -8,8 +8,9 @@ import React, { useContext, useEffect, useState } from 'react';
import { GeneratingContext } from '../../context';
import ScreenWrapper from '../../ScreenWrapper';
import { StatsBar } from '../../components/StatsBar';
+import ErrorBanner from '../../components/ErrorBanner';
-export default function VerticalOCRScree() {
+export default function VerticalOCRScreen() {
const [imageUri, setImageUri] = useState('');
const [results, setResults] = useState([]);
const [imageDimensions, setImageDimensions] = useState<{
@@ -17,15 +18,24 @@ export default function VerticalOCRScree() {
height: number;
}>();
const [inferenceTime, setInferenceTime] = useState(null);
+
+ const [error, setError] = useState(null);
+
const model = useVerticalOCR({
model: OCR_ENGLISH,
independentCharacters: true,
});
+
const { setGlobalGenerating } = useContext(GeneratingContext);
+
useEffect(() => {
setGlobalGenerating(model.isGenerating);
}, [model.isGenerating, setGlobalGenerating]);
+ useEffect(() => {
+ if (model.error) setError(String(model.error));
+ }, [model.error]);
+
const handleCameraPress = async (isCamera: boolean) => {
const image = await getImage(isCamera);
const width = image?.width;
@@ -46,14 +56,14 @@ export default function VerticalOCRScree() {
setInferenceTime(Date.now() - start);
setResults(output);
} catch (e) {
- console.error(e);
+ setError(e instanceof Error ? e.message : String(e));
}
};
- if (!model.isReady) {
+ if (!model.isReady && !model.error) {
return (
);
@@ -62,6 +72,8 @@ export default function VerticalOCRScree() {
return (
+ setError(null)} />
+
{imageUri && imageDimensions?.width && imageDimensions?.height ? (
(
{text}
- {score.toFixed(3)}
+ {score?.toFixed(3)}
))}
diff --git a/apps/computer-vision/app/semantic_segmentation/index.tsx b/apps/computer-vision/app/semantic_segmentation/index.tsx
index e8061b059..2e743174f 100644
--- a/apps/computer-vision/app/semantic_segmentation/index.tsx
+++ b/apps/computer-vision/app/semantic_segmentation/index.tsx
@@ -25,6 +25,7 @@ import React, { useContext, useEffect, useState } from 'react';
import { GeneratingContext } from '../../context';
import ScreenWrapper from '../../ScreenWrapper';
import { StatsBar } from '../../components/StatsBar';
+import ErrorBanner from '../../components/ErrorBanner';
const numberToColor: number[][] = [
[255, 87, 51], // 0 Red
@@ -69,7 +70,7 @@ export default function SemanticSegmentationScreen() {
DEEPLAB_V3_MOBILENET_V3_LARGE_QUANTIZED
);
- const { isReady, isGenerating, downloadProgress, forward } =
+ const { isReady, isGenerating, downloadProgress, forward, error: modelError } =
useSemanticSegmentation({ model: selectedModel });
const [imageUri, setImageUri] = useState('');
@@ -77,11 +78,16 @@ export default function SemanticSegmentationScreen() {
const [segImage, setSegImage] = useState(null);
const [canvasSize, setCanvasSize] = useState({ width: 0, height: 0 });
const [inferenceTime, setInferenceTime] = useState(null);
+ const [error, setError] = useState(null);
useEffect(() => {
setGlobalGenerating(isGenerating);
}, [isGenerating, setGlobalGenerating]);
+ useEffect(() => {
+ if (modelError) setError(String(modelError));
+ }, [modelError]);
+
const handleCameraPress = async (isCamera: boolean) => {
const image = await getImage(isCamera);
if (!image?.uri) return;
@@ -125,14 +131,14 @@ export default function SemanticSegmentationScreen() {
setSegImage(img);
setInferenceTime(Date.now() - start);
} catch (e) {
- console.error(e);
+ setError(e instanceof Error ? e.message : String(e));
}
};
- if (!isReady) {
+ if (!isReady && !modelError) {
return (
);
@@ -140,6 +146,7 @@ export default function SemanticSegmentationScreen() {
return (
+ setError(null)} />
[] = [
{ label: 'Rain Princess', value: STYLE_TRANSFER_RAIN_PRINCESS_QUANTIZED },
{ label: 'Udnie', value: STYLE_TRANSFER_UDNIE_QUANTIZED },
];
+import ErrorBanner from '../../components/ErrorBanner';
export default function StyleTransferScreen() {
const [selectedModel, setSelectedModel] = useState(
@@ -41,9 +42,14 @@ export default function StyleTransferScreen() {
setGlobalGenerating(model.isGenerating);
}, [model.isGenerating, setGlobalGenerating]);
+ useEffect(() => {
+ if (model.error) setError(String(model.error));
+ }, [model.error]);
+
const [imageUri, setImageUri] = useState('');
const [styledUri, setStyledUri] = useState('');
const [inferenceTime, setInferenceTime] = useState(null);
+ const [error, setError] = useState(null);
const handleCameraPress = async (isCamera: boolean) => {
const image = await getImage(isCamera);
@@ -63,15 +69,15 @@ export default function StyleTransferScreen() {
setInferenceTime(Date.now() - start);
setStyledUri(uri);
} catch (e) {
- console.error(e);
+ setError(e instanceof Error ? e.message : String(e));
}
}
};
- if (!model.isReady) {
+ if (!model.isReady && !model.error) {
return (
);
@@ -79,6 +85,7 @@ export default function StyleTransferScreen() {
return (
+ setError(null)} />
(0);
const [image, setImage] = useState(null);
const [steps, setSteps] = useState(40);
+
const [input, setInput] = useState('');
const [selectedModel, setSelectedModel] = useState(
BK_SDM_TINY_VPRED_256
);
const [generationTime, setGenerationTime] = useState(null);
+ const [keyboardVisible, setKeyboardVisible] = useState(false);
+ const [error, setError] = useState(null);
+ const [imageTitle, setImageTitle] = useState(null);
+
const imageSize = 224;
const model = useTextToImage({
model: selectedModel,
@@ -54,8 +60,28 @@ export default function TextToImageScreen() {
setGlobalGenerating(model.isGenerating);
}, [model.isGenerating, setGlobalGenerating]);
+ useEffect(() => {
+ if (model.error) setError(String(model.error));
+ }, [model.error]);
+
+ useEffect(() => {
+ const showSub = Keyboard.addListener('keyboardDidShow', () => {
+ setKeyboardVisible(true);
+ });
+ const hideSub = Keyboard.addListener('keyboardDidHide', () => {
+ setKeyboardVisible(false);
+ });
+ return () => {
+ showSub.remove();
+ hideSub.remove();
+ };
+ }, []);
+
const runForward = async () => {
if (!input.trim()) return;
+
+ setImageTitle(input);
+
try {
const start = Date.now();
const output = await model.generate(input, imageSize, steps);
@@ -65,27 +91,40 @@ export default function TextToImageScreen() {
setGenerationTime(Date.now() - start);
}
} catch (e) {
- console.error(e);
+ setError(e instanceof Error ? e.message : String(e));
+ setImageTitle(null);
} finally {
setInferenceStepIdx(0);
}
};
- if (!model.isReady) {
+ if (!model.isReady && !model.error) {
return (
);
}
return (
-
+ {
+ Keyboard.dismiss();
+ }}
+ >
+ {keyboardVisible && }
+
+ setError(null)} />
+
+
+ {imageTitle && {imageTitle}}
+
+
{model.isGenerating ? (
@@ -171,11 +210,28 @@ const styles = StyleSheet.create({
flex: 1,
width: '100%',
},
+ overlay: {
+ ...StyleSheet.absoluteFillObject,
+ backgroundColor: 'rgba(0,0,0,0.1)',
+ zIndex: 1,
+ },
+ titleContainer: {
+ paddingHorizontal: 16,
+ paddingTop: 8,
+ alignItems: 'center',
+ },
+ titleText: {
+ fontSize: 18,
+ fontWeight: '600',
+ color: '#333',
+ textAlign: 'center',
+ },
imageContainer: {
flex: 1,
alignItems: 'center',
justifyContent: 'center',
padding: 16,
+ zIndex: 0,
},
image: {
width: 256,
diff --git a/apps/computer-vision/app/vision_camera/index.tsx b/apps/computer-vision/app/vision_camera/index.tsx
index dbd969ad0..99d3fe1d5 100644
--- a/apps/computer-vision/app/vision_camera/index.tsx
+++ b/apps/computer-vision/app/vision_camera/index.tsx
@@ -34,6 +34,8 @@ import SegmentationTask from '../../components/vision_camera/tasks/SegmentationT
import InstanceSegmentationTask from '../../components/vision_camera/tasks/InstanceSegmentationTask';
import OCRTask from '../../components/vision_camera/tasks/OCRTask';
import StyleTransferTask from '../../components/vision_camera/tasks/StyleTransferTask';
+// 1. Import ErrorBanner
+import ErrorBanner from '../../components/ErrorBanner';
type TaskId =
| 'classification'
@@ -112,8 +114,6 @@ const TASKS: Task[] = [
},
];
-// Module-level consts so worklets in task components can always reference the same stable objects.
-// Never replaced — only mutated via setBlocking to avoid closure staleness.
const frameKillSwitch = createSynchronizable(false);
const cameraPositionSync = createSynchronizable<'front' | 'back'>('back');
@@ -132,6 +132,9 @@ export default function VisionCameraScreen() {
const [frameOutput, setFrameOutput] = useState | null>(null);
+
+ const [error, setError] = useState(null);
+
const { setGlobalGenerating } = useContext(GeneratingContext);
const isFocused = useIsFocused();
@@ -150,6 +153,7 @@ export default function VisionCameraScreen() {
useEffect(() => {
frameKillSwitch.setBlocking(true);
+ setError(null);
const id = setTimeout(() => {
frameKillSwitch.setBlocking(false);
}, 300);
@@ -172,6 +176,10 @@ export default function VisionCameraScreen() {
[setGlobalGenerating]
);
+ const handleErrorChange = useCallback((errorMessage: string | null) => {
+ setError(errorMessage);
+ }, []);
+
if (!cameraPermission.hasPermission) {
return (
@@ -209,12 +217,17 @@ export default function VisionCameraScreen() {
onProgressChange: setDownloadProgress,
onGeneratingChange: handleGeneratingChange,
onFpsChange: handleFpsChange,
+ onErrorChange: handleErrorChange,
};
return (
+
+ setError(null)} />
+
+
- {/* Layout sentinel — measures the full-screen area for bbox/canvas sizing */}
)}
- {!isReady && (
+ {!isReady && !error && (
void;
+}
+
+export default function ErrorBanner({ message, onDismiss }: ErrorBannerProps) {
+ if (!message) return null;
+
+ return (
+
+
+ {message}
+
+
+ ✕
+
+
+ );
+}
+
+const styles = StyleSheet.create({
+ container: {
+ backgroundColor: '#FEE2E2',
+ borderLeftWidth: 4,
+ borderLeftColor: '#EF4444',
+ borderRadius: 8,
+ marginHorizontal: 16,
+ marginVertical: 8,
+ paddingVertical: 10,
+ paddingLeft: 12,
+ paddingRight: 8,
+ flexDirection: 'row',
+ alignItems: 'center',
+ },
+ message: {
+ flex: 1,
+ color: '#991B1B',
+ fontSize: 14,
+ lineHeight: 20,
+ },
+ closeButton: {
+ padding: 4,
+ marginLeft: 8,
+ },
+ closeText: {
+ color: '#991B1B',
+ fontSize: 16,
+ fontWeight: '600',
+ },
+});
diff --git a/apps/computer-vision/components/vision_camera/tasks/ClassificationTask.tsx b/apps/computer-vision/components/vision_camera/tasks/ClassificationTask.tsx
index 5ea1cb86d..c7d858833 100644
--- a/apps/computer-vision/components/vision_camera/tasks/ClassificationTask.tsx
+++ b/apps/computer-vision/components/vision_camera/tasks/ClassificationTask.tsx
@@ -17,11 +17,16 @@ export default function ClassificationTask({
onProgressChange,
onGeneratingChange,
onFpsChange,
+ onErrorChange,
}: Props) {
const model = useClassification({ model: EFFICIENTNET_V2_S });
const [classResult, setClassResult] = useState({ label: '', score: 0 });
const lastFrameTimeRef = useRef(Date.now());
+ useEffect(() => {
+ onErrorChange(model.error ? String(model.error) : null);
+ }, [model.error, onErrorChange]);
+
useEffect(() => {
onReadyChange(model.isReady);
}, [model.isReady, onReadyChange]);
diff --git a/apps/computer-vision/components/vision_camera/tasks/InstanceSegmentationTask.tsx b/apps/computer-vision/components/vision_camera/tasks/InstanceSegmentationTask.tsx
index bf3c630fe..829dfb2ab 100644
--- a/apps/computer-vision/components/vision_camera/tasks/InstanceSegmentationTask.tsx
+++ b/apps/computer-vision/components/vision_camera/tasks/InstanceSegmentationTask.tsx
@@ -34,6 +34,7 @@ export default function InstanceSegmentationTask({
onProgressChange,
onGeneratingChange,
onFpsChange,
+ onErrorChange,
}: Props) {
const yolo26n = useInstanceSegmentation({
model: YOLO26N_SEG,
@@ -51,6 +52,10 @@ export default function InstanceSegmentationTask({
const [imageSize, setImageSize] = useState({ width: 1, height: 1 });
const lastFrameTimeRef = useRef(Date.now());
+ useEffect(() => {
+ onErrorChange(active.error ? String(active.error) : null);
+ }, [active.error, onErrorChange]);
+
useEffect(() => {
onReadyChange(active.isReady);
}, [active.isReady, onReadyChange]);
diff --git a/apps/computer-vision/components/vision_camera/tasks/OCRTask.tsx b/apps/computer-vision/components/vision_camera/tasks/OCRTask.tsx
index 1fd1d8088..e2b0e061b 100644
--- a/apps/computer-vision/components/vision_camera/tasks/OCRTask.tsx
+++ b/apps/computer-vision/components/vision_camera/tasks/OCRTask.tsx
@@ -17,12 +17,17 @@ export default function OCRTask({
onProgressChange,
onGeneratingChange,
onFpsChange,
+ onErrorChange,
}: Props) {
const model = useOCR({ model: OCR_ENGLISH });
const [detections, setDetections] = useState([]);
const [imageSize, setImageSize] = useState({ width: 1, height: 1 });
const lastFrameTimeRef = useRef(Date.now());
+ useEffect(() => {
+ onErrorChange(model.error ? String(model.error) : null);
+ }, [model.error, onErrorChange]);
+
useEffect(() => {
onReadyChange(model.isReady);
}, [model.isReady, onReadyChange]);
diff --git a/apps/computer-vision/components/vision_camera/tasks/ObjectDetectionTask.tsx b/apps/computer-vision/components/vision_camera/tasks/ObjectDetectionTask.tsx
index 0155be7e4..0ac5ef4af 100644
--- a/apps/computer-vision/components/vision_camera/tasks/ObjectDetectionTask.tsx
+++ b/apps/computer-vision/components/vision_camera/tasks/ObjectDetectionTask.tsx
@@ -25,6 +25,7 @@ export default function ObjectDetectionTask({
onProgressChange,
onGeneratingChange,
onFpsChange,
+ onErrorChange,
}: Props) {
const ssdlite = useObjectDetection({
model: SSDLITE_320_MOBILENET_V3_LARGE,
@@ -41,6 +42,10 @@ export default function ObjectDetectionTask({
const [imageSize, setImageSize] = useState({ width: 1, height: 1 });
const lastFrameTimeRef = useRef(Date.now());
+ useEffect(() => {
+ onErrorChange(active.error ? String(active.error) : null);
+ }, [active.error, onErrorChange]);
+
useEffect(() => {
onReadyChange(active.isReady);
}, [active.isReady, onReadyChange]);
diff --git a/apps/computer-vision/components/vision_camera/tasks/SegmentationTask.tsx b/apps/computer-vision/components/vision_camera/tasks/SegmentationTask.tsx
index db67f0e04..5bdd33b8f 100644
--- a/apps/computer-vision/components/vision_camera/tasks/SegmentationTask.tsx
+++ b/apps/computer-vision/components/vision_camera/tasks/SegmentationTask.tsx
@@ -44,6 +44,7 @@ export default function SegmentationTask({
onProgressChange,
onGeneratingChange,
onFpsChange,
+ onErrorChange,
}: Props) {
const segDeeplabResnet50 = useSemanticSegmentation({
model: DEEPLAB_V3_RESNET50_QUANTIZED,
@@ -87,6 +88,10 @@ export default function SegmentationTask({
const [maskImage, setMaskImage] = useState(null);
const lastFrameTimeRef = useRef(Date.now());
+ useEffect(() => {
+ onErrorChange(active.error ? String(active.error) : null);
+ }, [active.error, onErrorChange]);
+
useEffect(() => {
onReadyChange(active.isReady);
}, [active.isReady, onReadyChange]);
diff --git a/apps/computer-vision/components/vision_camera/tasks/StyleTransferTask.tsx b/apps/computer-vision/components/vision_camera/tasks/StyleTransferTask.tsx
index b37f67b44..df34e6702 100644
--- a/apps/computer-vision/components/vision_camera/tasks/StyleTransferTask.tsx
+++ b/apps/computer-vision/components/vision_camera/tasks/StyleTransferTask.tsx
@@ -31,6 +31,7 @@ export default function StyleTransferTask({
onProgressChange,
onGeneratingChange,
onFpsChange,
+ onErrorChange,
}: Props) {
const candy = useStyleTransfer({
model: STYLE_TRANSFER_CANDY,
@@ -46,6 +47,10 @@ export default function StyleTransferTask({
const [styledImage, setStyledImage] = useState(null);
const lastFrameTimeRef = useRef(Date.now());
+ useEffect(() => {
+ onErrorChange(active.error ? String(active.error) : null);
+ }, [active.error, onErrorChange]);
+
useEffect(() => {
onReadyChange(active.isReady);
}, [active.isReady, onReadyChange]);
diff --git a/apps/computer-vision/components/vision_camera/tasks/types.ts b/apps/computer-vision/components/vision_camera/tasks/types.ts
index 837c746f5..5f6b9725c 100644
--- a/apps/computer-vision/components/vision_camera/tasks/types.ts
+++ b/apps/computer-vision/components/vision_camera/tasks/types.ts
@@ -11,4 +11,5 @@ export type TaskProps = {
onProgressChange: (progress: number) => void;
onGeneratingChange: (isGenerating: boolean) => void;
onFpsChange: (fps: number, frameMs: number) => void;
+ onErrorChange: (error: string | null) => void;
};
diff --git a/apps/llm/app/llm/index.tsx b/apps/llm/app/llm/index.tsx
index 9ef743cd4..090af814a 100644
--- a/apps/llm/app/llm/index.tsx
+++ b/apps/llm/app/llm/index.tsx
@@ -30,6 +30,7 @@ const SUGGESTED_PROMPTS = [
];
import { useLLMStats } from '../../hooks/useLLMStats';
import { StatsBar } from '../../components/StatsBar';
+import ErrorBanner from '../../components/ErrorBanner';
export default function LLMScreenWrapper() {
const isFocused = useIsFocused();
@@ -52,11 +53,10 @@ function LLMScreen() {
llm.isGenerating,
tokenCount
);
+ const [error, setError] = useState(null);
useEffect(() => {
- if (llm.error) {
- console.error('LLM error:', llm.error);
- }
+ if (llm.error) setError(String(llm.error));
}, [llm.error]);
useEffect(() => {
@@ -70,13 +70,13 @@ function LLMScreen() {
try {
await llm.sendMessage(userInput);
} catch (e) {
- console.error(e);
+ setError(e instanceof Error ? e.message : String(e));
}
};
- return !llm.isReady ? (
+ return !llm.isReady && !llm.error ? (
) : (
@@ -90,6 +90,7 @@ function LLMScreen() {
keyboardVerticalOffset={Platform.OS === 'ios' ? 120 : 40}
>
+ setError(null)} />
{llm.messageHistory.length ? (
(null);
useEffect(() => {
setGlobalGenerating(llm.isGenerating);
@@ -138,9 +140,7 @@ function LLMScreen() {
}, [llm.messageHistory, llm.isGenerating]);
useEffect(() => {
- if (llm.error) {
- console.error('LLM error:', llm.error);
- }
+ if (llm.error) setError(String(llm.error));
}, [llm.error]);
const sendMessage = async () => {
@@ -150,13 +150,13 @@ function LLMScreen() {
try {
await llm.sendMessage(userInput);
} catch (e) {
- console.error(e);
+ setError(e instanceof Error ? e.message : String(e));
}
};
- return !llm.isReady ? (
+ return !llm.isReady && !llm.error ? (
) : (
@@ -170,6 +170,7 @@ function LLMScreen() {
keyboardVerticalOffset={Platform.OS === 'ios' ? 120 : 40}
>
+ setError(null)} />
{llm.messageHistory.length ? (
(null);
useEffect(() => {
setGlobalGenerating(llm.isGenerating);
@@ -86,9 +88,7 @@ function LLMToolCallingScreen() {
}, [configure]);
useEffect(() => {
- if (llm.error) {
- console.error('LLM error:', llm.error);
- }
+ if (llm.error) setError(String(llm.error));
}, [llm.error]);
const requestCalendarPermission = async () => {
@@ -172,13 +172,13 @@ function LLMToolCallingScreen() {
try {
await llm.sendMessage(userInput);
} catch (e) {
- console.error(e);
+ setError(e instanceof Error ? e.message : String(e));
}
};
- return !llm.isReady ? (
+ return !llm.isReady && !llm.error ? (
) : (
@@ -193,6 +193,7 @@ function LLMToolCallingScreen() {
+ setError(null)} />
{llm.messageHistory.length ? (
(null);
const { setGlobalGenerating } = useContext(GeneratingContext);
+ // Added error state
+ const [error, setError] = useState(null);
+
const vlm = useLLM({
model: LFM2_VL_1_6B_QUANTIZED,
});
@@ -57,15 +61,20 @@ function MultimodalLLMScreen() {
setGlobalGenerating(vlm.isGenerating);
}, [vlm.isGenerating, setGlobalGenerating]);
+ // Updated to use local error state
useEffect(() => {
- if (vlm.error) console.error('MultimodalLLM error:', vlm.error);
+ if (vlm.error) setError(String(vlm.error));
}, [vlm.error]);
const pickImage = async () => {
- const result = await launchImageLibrary({ mediaType: 'photo' });
- if (result.assets && result.assets.length > 0) {
- const uri = result.assets[0]?.uri;
- if (uri) setImageUri(uri);
+ try {
+ const result = await launchImageLibrary({ mediaType: 'photo' });
+ if (result.assets && result.assets.length > 0) {
+ const uri = result.assets[0]?.uri;
+ if (uri) setImageUri(uri);
+ }
+ } catch (e) {
+ setError(e instanceof Error ? e.message : String(e));
}
};
@@ -84,19 +93,17 @@ function MultimodalLLMScreen() {
currentImageUri ? { imagePath: currentImageUri } : undefined
);
} catch (e) {
- console.error('Generation error:', e);
+ // Updated to set UI error instead of just console.error
+ setError(e instanceof Error ? e.message : String(e));
}
};
- if (!vlm.isReady) {
+ // Updated Spinner check so it doesn't block the ErrorBanner if loading fails
+ if (!vlm.isReady && !vlm.error) {
return (
);
}
@@ -110,6 +117,9 @@ function MultimodalLLMScreen() {
keyboardVerticalOffset={Platform.OS === 'ios' ? 120 : 40}
>
+ {/* Injected ErrorBanner here */}
+ setError(null)} />
+
{vlm.messageHistory.length ? (
(QWEN3_0_6B_QUANTIZED);
const [selectedSTT, setSelectedSTT] =
useState(WHISPER_TINY_EN);
+ const [error, setError] = useState(null);
const [recorder] = useState(
() =>
@@ -116,7 +118,7 @@ function VoiceChatScreen() {
finalResult = text;
}
} catch (e) {
- console.error('Streaming error:', e);
+ setError(e instanceof Error ? e.message : String(e));
} finally {
if (finalResult.trim().length > 0) {
await llm.sendMessage(finalResult);
@@ -127,20 +129,18 @@ function VoiceChatScreen() {
};
useEffect(() => {
- if (llm.error) {
- console.error('LLM error:', llm.error);
- }
+ if (llm.error) setError(String(llm.error));
}, [llm.error]);
useEffect(() => {
- if (speechToText.error) {
- console.error('speechToText error:', speechToText.error);
- }
+ if (speechToText.error) setError(String(speechToText.error));
}, [speechToText.error]);
- return !llm.isReady || !speechToText.isReady ? (
+ return (!llm.isReady || !speechToText.isReady) &&
+ !llm.error &&
+ !speechToText.error ? (
) : (
@@ -154,7 +154,7 @@ function VoiceChatScreen() {
Qwen 3 x Whisper
-
+ setError(null)} />
{llm.messageHistory.length > 0 || liveTranscription.length > 0 ? (
void;
+}
+
+export default function ErrorBanner({ message, onDismiss }: ErrorBannerProps) {
+ if (!message) return null;
+
+ return (
+
+
+ {message}
+
+
+ ✕
+
+
+ );
+}
+
+const styles = StyleSheet.create({
+ container: {
+ backgroundColor: '#FEE2E2',
+ borderLeftWidth: 4,
+ borderLeftColor: '#EF4444',
+ borderRadius: 8,
+ marginHorizontal: 16,
+ marginVertical: 8,
+ paddingVertical: 10,
+ paddingLeft: 12,
+ paddingRight: 8,
+ flexDirection: 'row',
+ alignItems: 'center',
+ },
+ message: {
+ flex: 1,
+ color: '#991B1B',
+ fontSize: 14,
+ lineHeight: 20,
+ },
+ closeButton: {
+ padding: 4,
+ marginLeft: 8,
+ },
+ closeText: {
+ color: '#991B1B',
+ fontSize: 16,
+ fontWeight: '600',
+ },
+});
diff --git a/apps/speech/components/ErrorBanner.tsx b/apps/speech/components/ErrorBanner.tsx
new file mode 100644
index 000000000..a5bebc504
--- /dev/null
+++ b/apps/speech/components/ErrorBanner.tsx
@@ -0,0 +1,53 @@
+import React from 'react';
+import { View, Text, TouchableOpacity, StyleSheet } from 'react-native';
+
+interface ErrorBannerProps {
+ message: string | null;
+ onDismiss: () => void;
+}
+
+export default function ErrorBanner({ message, onDismiss }: ErrorBannerProps) {
+ if (!message) return null;
+
+ return (
+
+
+ {message}
+
+
+ ✕
+
+
+ );
+}
+
+const styles = StyleSheet.create({
+ container: {
+ backgroundColor: '#FEE2E2',
+ borderLeftWidth: 4,
+ borderLeftColor: '#EF4444',
+ borderRadius: 8,
+ marginHorizontal: 16,
+ marginVertical: 8,
+ paddingVertical: 10,
+ paddingLeft: 12,
+ paddingRight: 8,
+ flexDirection: 'row',
+ alignItems: 'center',
+ },
+ message: {
+ flex: 1,
+ color: '#991B1B',
+ fontSize: 14,
+ lineHeight: 20,
+ },
+ closeButton: {
+ padding: 4,
+ marginLeft: 8,
+ },
+ closeText: {
+ color: '#991B1B',
+ fontSize: 16,
+ fontWeight: '600',
+ },
+});
diff --git a/apps/speech/screens/SpeechToTextScreen.tsx b/apps/speech/screens/SpeechToTextScreen.tsx
index 7fdf7f392..dfd39c15b 100644
--- a/apps/speech/screens/SpeechToTextScreen.tsx
+++ b/apps/speech/screens/SpeechToTextScreen.tsx
@@ -41,6 +41,7 @@ import SWMIcon from '../assets/swm_icon.svg';
import DeviceInfo from 'react-native-device-info';
import { VerboseTranscription } from '../components/VerboseTranscription';
+import ErrorBanner from '../components/ErrorBanner';
const isSimulator = DeviceInfo.isEmulatorSync();
@@ -64,7 +65,9 @@ export const SpeechToTextScreen = ({ onBack }: { onBack: () => void }) => {
} | null>(null);
const [enableTimestamps, setEnableTimestamps] = useState(false);
+ const [error, setError] = useState(null);
const [audioURL, setAudioURL] = useState('');
+ const [hasMicPermission, setHasMicPermission] = useState(false);
const isRecordingRef = useRef(false);
const [liveTranscribing, setLiveTranscribing] = useState(false);
@@ -79,8 +82,8 @@ export const SpeechToTextScreen = ({ onBack }: { onBack: () => void }) => {
iosOptions: ['allowBluetoothHFP', 'defaultToSpeaker'],
});
const checkPerms = async () => {
- const granted = await AudioManager.requestRecordingPermissions();
- if (!granted) console.warn('Microphone permission denied!');
+ const status = await AudioManager.requestRecordingPermissions();
+ setHasMicPermission(status === 'Granted');
};
checkPerms();
}, []);
@@ -122,13 +125,18 @@ export const SpeechToTextScreen = ({ onBack }: { onBack: () => void }) => {
});
setTranscriptionTime(Date.now() - start);
setTranscription(result);
- } catch (error) {
- console.error('Error decoding audio data', error);
+ } catch (e) {
+ setError(e instanceof Error ? e.message : String(e));
return;
}
};
const handleStartTranscribeFromMicrophone = async () => {
+ if (!hasMicPermission) {
+ setError('Microphone permission denied. Please enable it in Settings.');
+ return;
+ }
+
isRecordingRef.current = true;
setLiveTranscribing(true);
@@ -151,14 +159,14 @@ export const SpeechToTextScreen = ({ onBack }: { onBack: () => void }) => {
try {
const success = await AudioManager.setAudioSessionActivity(true);
if (!success) {
- console.warn('Cannot start audio session correctly');
+ setError('Cannot start audio session correctly');
}
const result = recorder.current.start();
if (result.status === 'error') {
- console.warn('Recording problems: ', result.message);
+ setError(`Recording problems: ${result.message}`);
}
} catch (e) {
- console.error('Failed to start recorder', e);
+ setError(e instanceof Error ? e.message : String(e));
isRecordingRef.current = false;
setLiveTranscribing(false);
return;
@@ -189,8 +197,8 @@ export const SpeechToTextScreen = ({ onBack }: { onBack: () => void }) => {
setLiveResult(currentDisplay);
}
- } catch (error) {
- console.error('Error during live transcription:', error);
+ } catch (e) {
+ setError(e instanceof Error ? e.message : String(e));
} finally {
setLiveTranscribing(false);
}
@@ -216,12 +224,15 @@ export const SpeechToTextScreen = ({ onBack }: { onBack: () => void }) => {
};
const getModelStatus = () => {
- if (model.error) return `${model.error}`;
if (model.isGenerating) return 'Transcribing...';
if (model.isReady) return 'Ready to transcribe';
return `Loading model: ${(100 * model.downloadProgress).toFixed(2)}%`;
};
+ useEffect(() => {
+ if (model.error) setError(String(model.error));
+ }, [model.error]);
+
const readyToTranscribe = !model.isGenerating && model.isReady;
const recordingButtonDisabled = isSimulator || !readyToTranscribe;
@@ -263,6 +274,7 @@ export const SpeechToTextScreen = ({ onBack }: { onBack: () => void }) => {
)}
+ setError(null)} />
void }) => {
const [inputText, setInputText] = useState('');
const [isPlaying, setIsPlaying] = useState(false);
const [readyToGenerate, setReadyToGenerate] = useState(false);
+ const [error, setError] = useState(null);
const audioContextRef = useRef(null);
const sourceRef = useRef(null);
@@ -158,19 +160,22 @@ export const TextToSpeechScreen = ({ onBack }: { onBack: () => void }) => {
onNext,
onEnd,
});
- } catch (error) {
- console.error('Error generating or playing audio:', error);
+ } catch (e) {
+ setError(e instanceof Error ? e.message : String(e));
setIsPlaying(false);
}
};
const getModelStatus = () => {
- if (model.error) return `${model.error}`;
if (model.isGenerating) return 'Generating audio...';
if (model.isReady) return 'Ready to synthesize';
return `Loading model: ${(100 * model.downloadProgress).toFixed(2)}%`;
};
+ useEffect(() => {
+ if (model.error) setError(String(model.error));
+ }, [model.error]);
+
return (
@@ -190,6 +195,7 @@ export const TextToSpeechScreen = ({ onBack }: { onBack: () => void }) => {
Status: {getModelStatus()}
+ setError(null)} />
(null);
const [inputSentence, setInputSentence] = useState('');
const [sentencesWithEmbeddings, setSentencesWithEmbeddings] = useState<
@@ -52,8 +54,8 @@ function TextEmbeddingsScreen() {
}
setSentencesWithEmbeddings(embeddings);
- } catch (error) {
- console.error('Error generating embeddings:', error);
+ } catch (e) {
+ setError(e instanceof Error ? e.message : String(e));
}
};
@@ -78,8 +80,8 @@ function TextEmbeddingsScreen() {
);
matches.sort((a, b) => b.similarity - a.similarity);
setTopMatches(matches.slice(0, 3));
- } catch (error) {
- console.error('Error generating embedding:', error);
+ } catch (e) {
+ setError(e instanceof Error ? e.message : String(e));
}
};
@@ -94,8 +96,8 @@ function TextEmbeddingsScreen() {
...prev,
{ sentence: inputSentence, embedding },
]);
- } catch (error) {
- console.error('Error generating embedding:', error);
+ } catch (e) {
+ setError(e instanceof Error ? e.message : String(e));
}
setInputSentence('');
@@ -106,8 +108,8 @@ function TextEmbeddingsScreen() {
if (!model.isReady) return;
try {
setSentencesWithEmbeddings([]);
- } catch (error) {
- console.error('Error clearing the list:', error);
+ } catch (e) {
+ setError(e instanceof Error ? e.message : String(e));
}
};
@@ -130,6 +132,7 @@ function TextEmbeddingsScreen() {
Text Embeddings Playground
{getModelStatusText()}
+ setError(null)} />
List of Existing Sentences
diff --git a/apps/text-embeddings/components/ErrorBanner.tsx b/apps/text-embeddings/components/ErrorBanner.tsx
new file mode 100644
index 000000000..a5bebc504
--- /dev/null
+++ b/apps/text-embeddings/components/ErrorBanner.tsx
@@ -0,0 +1,53 @@
+import React from 'react';
+import { View, Text, TouchableOpacity, StyleSheet } from 'react-native';
+
+interface ErrorBannerProps {
+ message: string | null;
+ onDismiss: () => void;
+}
+
+export default function ErrorBanner({ message, onDismiss }: ErrorBannerProps) {
+ if (!message) return null;
+
+ return (
+
+
+ {message}
+
+
+ ✕
+
+
+ );
+}
+
+const styles = StyleSheet.create({
+ container: {
+ backgroundColor: '#FEE2E2',
+ borderLeftWidth: 4,
+ borderLeftColor: '#EF4444',
+ borderRadius: 8,
+ marginHorizontal: 16,
+ marginVertical: 8,
+ paddingVertical: 10,
+ paddingLeft: 12,
+ paddingRight: 8,
+ flexDirection: 'row',
+ alignItems: 'center',
+ },
+ message: {
+ flex: 1,
+ color: '#991B1B',
+ fontSize: 14,
+ lineHeight: 20,
+ },
+ closeButton: {
+ padding: 4,
+ marginLeft: 8,
+ },
+ closeText: {
+ color: '#991B1B',
+ fontSize: 16,
+ fontWeight: '600',
+ },
+});