from nomadicml import NomadicMLimport os# Initialize with your API keyclient = NomadicML( api_key=os.environ.get("NOMADICML_API_KEY"))
To get your API key, log in to the web platform, go to Profile > API Key, and generate a new key.
We recommend storing your API key in an environment variable for security.
The standard workflow involves uploading your videos first, then running analysis on them.
Uploads accept local paths or remote URLs that end with a common video extension (.mp4, .mov, .avi, .webm):
Copy
from nomadicml.video import AnalysisType, CustomCategoryresponse = client.upload('https://storage.googleapis.com/videolm-bc319.firebasestorage.app/example-videos/Mayhem-on-Road-Compilation.mp4')# Extract video IDvideo_id = response["video_id"]# Add scope="org" when uploading to shared organization folders.# Then analyze itanalysis = client.analyze(video_id, analysis_type=AnalysisType.ASK, custom_event="Find outlier events", custom_category=CustomCategory.DRIVING )print(analysis)
You can also pass a list of paths/URLs to upload and a list of ids to analyze for batch operations.
Copy
paths = [ 'https://storage.googleapis.com/videolm-bc319.firebasestorage.app/example-videos/Driving-a-bus-in-Switzerland-on-Snowy-Roads.mp4', 'https://storage.googleapis.com/videolm-bc319.firebasestorage.app/example-videos/LIDAR-RBG-Waymo-YouTube-Public-Sample.mp4', 'https://storage.googleapis.com/videolm-bc319.firebasestorage.app/example-videos/Mayhem-on-Road-Compilation.mp4', 'https://storage.googleapis.com/videolm-bc319.firebasestorage.app/example-videos/Oakland-to-SF-on-Bridge.mp4', 'https://storage.googleapis.com/videolm-bc319.firebasestorage.app/example-videos/Zoox_San%20Francisco-Bike-To-Wherever-Day.mp4']response = client.upload(paths)video_ids = [v['video_id'] for v in response]batch = client.analyze(video_ids, analysis_type=AnalysisType.ASK, custom_event="Find outlier events", custom_category=CustomCategory.DRIVING )print(batch["batch_metadata"]) # Contains batch_id, batch_viewer_url, batch_typefor result in batch["results"]: print(result["video_id"], result["analysis_id"], len(result.get("events", [])))
Search can be used are open in the natural language queries. Nomadic will reason about what fits best. Search response includes a chain-of-thought summary plus the reasoning behind each matched video. Supply
the natural language query, the folder name, and the scope ("user", "org",
or "sample"), and the call returns the complete set of results in one
payload.
Copy
results = client.search( query="Find near-misses with pedestrians on crosswalks", folder_name="fleet_uploads_march", scope="org", # optional, defaults to "user")print(results["summary"]) # overall overviewprint(results["thoughts"]) # list of reasoning stepsfor match in results["matches"]: print(match["video_id"], match["reason"], match["similarity"])# Advanced: reuse the session ID if you want to reference the same results laterprint(results["session_id"]) # Unique identifier for this search session
Nomadic supports different analysis types, each optimized for different use cases. All analysis types can be run on a single video or a batch of videos.
Runs the same expert agents available in the dashboard. Pick one of AnalysisType.GENERAL_AGENT, LANE_CHANGE, TURN, RELATIVE_MOTION, or DRIVING_VIOLATIONS.
For larger projects, you can organize videos into folders and run batch analysis on entire folders at once. This is especially useful for processing datasets or running systematic reviews.
# Create a new personal folderfolder = client.create_folder("marketing", description="Q1 campaign")print(folder["id"], folder["created_at"])# Lookup by name (personal scope by default)existing = client.get_folder("marketing")print(existing["id"], existing["video_count"])# Organization-scoped foldersorg_folder = client.create_folder("fleet_uploads", scope="org")org_existing = client.get_folder("fleet_uploads", scope="org")
This example demonstrates a common workflow: first, run a broad analysis to cast a wide net, then use search across analysis results to hone in on specific events, and finally, run a detailed analysis on the resulting subset of videos.
Copy
from nomadicml.video import AnalysisTypepaths = [ 'https://storage.googleapis.com/videolm-bc319.firebasestorage.app/example-videos/Driving-a-bus-in-Switzerland-on-Snowy-Roads.mp4', 'https://storage.googleapis.com/videolm-bc319.firebasestorage.app/example-videos/LIDAR-RBG-Waymo-YouTube-Public-Sample.mp4', 'https://storage.googleapis.com/videolm-bc319.firebasestorage.app/example-videos/Mayhem-on-Road-Compilation.mp4', 'https://storage.googleapis.com/videolm-bc319.firebasestorage.app/example-videos/Oakland-to-SF-on-Bridge.mp4', 'https://storage.googleapis.com/videolm-bc319.firebasestorage.app/example-videos/Zoox_San%20Francisco-Bike-To-Wherever-Day.mp4']# Define a folder for the projectfolder_name = "agent-analysis-videos"project_folder = client.create_or_get_folder(folder_name, scope="org")print(f"Using folder '{project_folder['name']}' scoped to {project_folder['scope']} (id={project_folder['id']})")print("📁 Step 1: Uploading videos to project folder...")response = client.upload(paths, folder=folder_name, scope="org")print(f"✅ Successfully uploaded {len(response)} videos to '{folder_name}' folder")print("\n🔍 Step 2: Running agent analysis on all videos...")analyses = client.analyze(folder=folder_name, analysis_type=AnalysisType.GENERAL_AGENT, )print(f"✅ Completed agent analysis on {len(analyses)} videos")print("\n🔎 Step 3: Searching for pedestrian-related incidents...")# You can ask any text-based question about the analysis resultssearch_results = client.search( query="Find risky incidents involving pedestrians", folder_name=folder_name, scope="org",)matching_video_ids = list(set([match['video_id'] for match in search_results['matches']]))print(f"✅ Found {len(matching_video_ids)} videos with pedestrian incidents")print(f"\n🎯 Step 4: Re-analyzing {len(matching_video_ids)} videos for pedestrian fault analysis...")analyses = client.analyze( matching_video_ids, analysis_type=AnalysisType.ASK, custom_event="Mark incidents involving pedestrians where pedestrians are at fault", custom_category="driving")print(f"✅ Completed detailed analysis on {len(analyses)} videos")for analysis in analyses: if analysis['events']: print(f"\n🎬 Events found in video {analysis['video_id']}:") for e in analysis['events']: print(f" • {e}") print("-" * 80)print("\n🧹 Step 5: Cleaning up - deleting project videos...")for response in client.my_videos(folder_name): result = client.delete_video(response['video_id'])
You don’t need to re-upload videos to run new analyses. You can efficiently query already uploaded videos using either their specific video_ids or by organizing them into folders.
This is the most direct way to re-run analysis on a few specific videos. After you upload a video, the API returns a video_id. Store this ID to reference the video in future calls.
Copy
# Assume you have previously uploaded two videos and saved their IDsvideo_id_1 = "5be1bd918d0f44adb8346fae231523a2"video_id_2 = "81fbee264993496593db308ad4ccda02"# Now, you can run a new analysis on just these two videospedestrian_analysis = client.analyze( [video_id_1, video_id_2], analysis_type=AnalysisType.ASK, custom_event="pedestrians close to vehicle", custom_category="driving")print(f"Found {len(pedestrian_analysis)} videos with pedestrian interactions.")
For larger-scale projects, organizing videos into folders is the best practice. This allows you to run analysis on an entire dataset with a single command.
Copy
from nomadicml.video import AnalysisType, CustomCategoryfolder_name = "2024_urban_driving_set"# Step 1: Upload and organize your videos into a folder (only needs to be done once)client.upload( ['/path/to/city_drive_1.mp4', '/path/to/city_drive_2.mp4'], folder=folder_name)# Step 2: Run an initial analysis to find all road signs and their MUTCD codesprint(f"\nRunning initial analysis for 'road signs & MUTCD codes' in folder '{folder_name}'...")pedestrian_analysis = client.analyze( folder=folder_name, analysis_type=AnalysisType.ASK, custom_event="Find all road signs and note their corresponding MUTCD codes?", custom_category=CustomCategory.ENVIRONMENT)print(f"Found {len(pedestrian_analysis)} videos with road signs.")# Step 3: Later, run a different analysis on the same set of videosprint(f"\nRunning second analysis for 'potholes' in folder '{folder_name}'...")pothole_analysis = client.analyze( folder=folder_name, analysis_type=AnalysisType.ASK, custom_event="potholes or major road cracks", custom_category=CustomCategory.DRIVING)print(f"Found {len(pothole_analysis)} videos with potholes.")
Nomadic can generate annotated thumbnails with segmentations for detected events. This is useful for visual inspection and validation of detected events.
If you have an existing analysis without thumbnails, or need to retrieve thumbnails later, use the get_visuals() methods:
Copy
# Get all thumbnail URLs for an analysisvideo_id = "1b9dac2525f34696a7ca03b0bdf775c2"analysis_id = "auc1QR27QdjluPH0qDoE"# Get all thumbnailsthumbnails = client.get_visuals(video_id, analysis_id)# Get a specific thumbnail by event indexfirst_thumbnail = client.get_visual(video_id, analysis_id, 0)
Generated thumbnails are stored in Google Cloud Storage and include:
The original video frame at the event timestamp
Bounding box annotations highlighting the detected object
Object labels for easy identification
The URLs are publicly accessible and can be embedded in reports, dashboards, or shared with stakeholders.
If thumbnails don’t exist for an analysis, get_visuals() will automatically generate them. This is useful for older analyses that were created without the is_thumbnail flag.
Nomadic can extract telemetry data from on-screen overlays in videos. This is useful for videos with embedded metadata like timestamps, GPS coordinates, speed, altitude, or custom telemetry values.
Important: Metadata describing overlay fields must be provided at upload time. During analysis, you can only enable/disable extraction of these pre-uploaded fields using boolean flags.
You can provide metadata files that describe the overlay fields in your videos. Metadata must be a properly formatted JSON file according to the Metadata Ingestion Spec, and the .json file must share the same base filename as the video (for example, drone_footage.mp4 pairs with drone_footage.json).
Copy
# Single video with metadata file (names must match)result = client.upload(("dashcam_video.mp4", "dashcam_video.json"))# Multiple videos with mixed metadatauploads = client.upload([ ("video1.mp4", "video1.json"), # Video with metadata "video2.mp4", # Video without metadata ("video3.mp4", "video3.json"), # Another video with metadata])print(f"Uploaded {len(uploads)} videos")for upload in uploads: print(f"Video ID: {upload['video_id']}, Status: {upload['status']}")
When analyzing videos that were uploaded with metadata, you can enable extraction of specific telemetry types. The overlay_mode parameter selects which type of fields to extract - the metadata must have been provided at upload time. You can only select one mode at a time.
Copy
from nomadicml import OverlayMode# Extract timestamp overlays# Note: This video must have been uploaded with metadataanalysis = client.analyze( video_id, analysis_type=AnalysisType.ASK, custom_event="vehicle speeding events", overlay_mode=OverlayMode.TIMESTAMPS)# Or extract GPS coordinatesanalysis = client.analyze( video_id, analysis_type=AnalysisType.ASK, custom_event="route violations", overlay_mode=OverlayMode.GPS)# Or extract custom fields from metadataanalysis = client.analyze( video_id, analysis_type=AnalysisType.ASK, custom_event="speed limit violations", overlay_mode=OverlayMode.CUSTOM # Will extract custom fields like speed, altitude, etc.)# Access extracted overlay data in eventsfor event in analysis["events"]: print(f"Event: {event['label']} at {event['t_start']}-{event['t_end']}") overlay_values = event.get("overlay", {}) for field, values in overlay_values.items(): start = values.get("start") end = values.get("end") print(f" {field}: {start} -> {end}")
from nomadicml import OverlayMode# Step 1: Upload batch of videos with metadatavideos_with_metadata = [ ("fleet_cam_001.mp4", "fleet_cam_001.json"), ("fleet_cam_002.mp4", "fleet_cam_002.json"), ("fleet_cam_003.mp4", "fleet_cam_003.json"),]upload_results = client.upload(videos_with_metadata, folder="fleet_telemetry")video_ids = [r['video_id'] for r in upload_results]# Step 2: Analyze with overlay extraction enabled# The overlay_mode selects which type of fields to extract from the metadata uploaded in Step 1batch_analysis = client.analyze( video_ids, analysis_type=AnalysisType.ASK, custom_event="harsh braking events where speed drops rapidly", overlay_mode=OverlayMode.CUSTOM # Will extract custom fields (e.g., speed) from uploaded metadata)# Process results with overlay datafor result in batch_analysis["results"]: video_id = result["video_id"] for event in result["events"]: # Speed (and other custom telemetry) is exposed through the overlay map speed_overlay = event.get("overlay", {}).get("frame_speed") if speed_overlay: print( f"Video {video_id}: Speed changed from " f"{speed_overlay.get('start')} to {speed_overlay.get('end')}" )
The metadata JSON file should describe the fields that appear as overlays in your video. For the complete metadata ingestion specification and detailed schema documentation, see the Metadata Ingestion Spec.Example metadata file:
Metadata files must have the same base filename as their corresponding video file. For example, dashcam_recording.mp4 should have metadata named dashcam_recording.json.
from pymongo import MongoClient# Assume 'analysis_results' is the list of dicts from a client.analyze() callresults_to_store = []for analysis in analysis_results: # ... (processing logic from previous examples) ... results_to_store.append(processed_event)# Connect to MongoDB and insert the documentstry: db_client = MongoClient('mongodb://localhost:27017/') db = db_client['nomadicml_results'] collection = db['driving_events'] if results_to_store: collection.insert_many(results_to_store) print("Successfully saved results to MongoDB.")except Exception as e: print(f"An error occurred with MongoDB: {e}")