Listing Models
View and manage your registered AI models
View & Manage Your Models
List all your registered models, view their details, and track their testing history and performance metrics.
Quick Model Listing
List All Your Models
Get an overview of all registered models with their current status and testing history.
from modelred import ModelRed
async with ModelRed() as client:
# Get all registered models
models = await client.list_models()
print(f"Found {len(models)} registered models:")
for model in models:
print(f" • {model['modelId']} ({model['provider']})")
print(f" Status: {model.get('displayName', 'N/A')}")
print(f" Last Tested: {model.get('lastTested', 'Never')}")
print()
Model Information
🔍 Model List Response
Each model in the list contains comprehensive information about its configuration and status.
# Example model list response
models = await client.list_models()
for model in models:
print(f"Model ID: {model['modelId']}")
print(f"Provider: {model['provider']}")
print(f"Display Name: {model.get('displayName', 'N/A')}")
print(f"Model Name: {model.get('modelName', 'N/A')}")
print(f"Description: {model.get('description', 'N/A')}")
print(f"Active: {model.get('isActive', True)}")
print(f"Created: {model['createdAt']}")
print(f"Updated: {model['updatedAt']}")
print(f"Last Tested: {model.get('lastTested', 'Never')}")
print(f"Test Count: {model.get('testCount', 0)}")
print(f"Completed Assessments: {model.get('completedAssessments', 0)}")
# Metadata
if model.get('metadata'):
print(f"Metadata: {model['metadata']}")
# Creator info
if model.get('createdByUser'):
creator = model['createdByUser']
print(f"Created by: {creator.get('name', 'Unknown')} ({creator.get('email', 'N/A')})")
# Recent assessments
if model.get('recentAssessments'):
print("Recent Assessments:")
for assessment in model['recentAssessments']:
print(f" - Score: {assessment.get('overallScore', 'N/A')}/10")
print(f" Risk: {assessment.get('riskLevel', 'N/A')}")
print(f" Completed: {assessment.get('completedAt', 'N/A')}")
print("-" * 50)
Get Individual Model Details
Detailed Model Information
Get comprehensive details about a specific model including its full configuration and assessment history.
async with ModelRed() as client:
# Get detailed information about a specific model
model_details = await client.get_model("my-gpt-model")
print(f"Model: {model_details['displayName']}")
print(f"Provider: {model_details['provider']}")
print(f"Status: {'Active' if model_details['isActive'] else 'Inactive'}")
print(f"Last Tested: {model_details.get('lastTested', 'Never')}")
print(f"Total Tests: {model_details.get('testCount', 0)}")
# Assessment history
if model_details.get('recentAssessments'):
print("\nRecent Assessment History:")
for assessment in model_details['recentAssessments']:
score = assessment.get('overallScore', 'N/A')
risk = assessment.get('riskLevel', 'N/A')
date = assessment.get('completedAt', 'N/A')
tests = assessment.get('testTypes', [])
print(f" Assessment: {score}/10 ({risk} risk)")
print(f" Date: {date}")
print(f" Test Suites: {', '.join(tests)}")
print()
Filtering and Organization
🔍 Filter Models by Criteria
Organize and filter your models based on various criteria.
async def filter_models_by_criteria():
async with ModelRed() as client:
models = await client.list_models()
# Filter by provider
openai_models = [m for m in models if m['provider'] == 'OPENAI']
anthropic_models = [m for m in models if m['provider'] == 'ANTHROPIC']
print(f"OpenAI Models: {len(openai_models)}")
print(f"Anthropic Models: {len(anthropic_models)}")
# Filter by environment (using metadata)
prod_models = [
m for m in models
if m.get('metadata', {}).get('environment') == 'production'
]
# Filter by team (using metadata)
ai_team_models = [
m for m in models
if m.get('metadata', {}).get('team') == 'ai-platform'
]
# Filter by testing status
recently_tested = [
m for m in models
if m.get('lastTested') and m.get('testCount', 0) > 0
]
untested_models = [
m for m in models
if not m.get('lastTested') or m.get('testCount', 0) == 0
]
print(f"Production Models: {len(prod_models)}")
print(f"AI Team Models: {len(ai_team_models)}")
print(f"Recently Tested: {len(recently_tested)}")
print(f"Untested Models: {len(untested_models)}")
# Models needing attention
high_risk_models = []
for model in models:
recent_assessments = model.get('recentAssessments', [])
if recent_assessments:
latest = recent_assessments[0]
if latest.get('riskLevel') in ['HIGH', 'CRITICAL']:
high_risk_models.append(model)
if high_risk_models:
print(f"\n⚠️ {len(high_risk_models)} models need attention:")
for model in high_risk_models:
print(f" • {model['modelId']} - {model['recentAssessments'][0]['riskLevel']} risk")
📊 Model Statistics Summary
Generate summary statistics about your model inventory.
async def generate_model_summary():
async with ModelRed() as client:
models = await client.list_models()
# Provider distribution
provider_count = {}
for model in models:
provider = model['provider']
provider_count[provider] = provider_count.get(provider, 0) + 1
# Environment distribution
env_count = {}
for model in models:
env = model.get('metadata', {}).get('environment', 'unknown')
env_count[env] = env_count.get(env, 0) + 1
# Testing statistics
total_models = len(models)
tested_models = len([m for m in models if m.get('testCount', 0) > 0])
active_models = len([m for m in models if m.get('isActive', True)])
# Assessment statistics
total_assessments = sum(m.get('completedAssessments', 0) for m in models)
avg_score = 0
scored_models = 0
for model in models:
recent = model.get('recentAssessments', [])
if recent and recent[0].get('overallScore'):
avg_score += recent[0]['overallScore']
scored_models += 1
if scored_models > 0:
avg_score = avg_score / scored_models
# Print summary
print("📊 Model Inventory Summary")
print("=" * 30)
print(f"Total Models: {total_models}")
print(f"Active Models: {active_models}")
print(f"Tested Models: {tested_models} ({tested_models/total_models*100:.1f}%)")
print(f"Total Assessments: {total_assessments}")
if scored_models > 0:
print(f"Average Security Score: {avg_score:.1f}/10")
print("\nProvider Distribution:")
for provider, count in provider_count.items():
print(f" {provider}: {count}")
print("\nEnvironment Distribution:")
for env, count in env_count.items():
print(f" {env}: {count}")
Model Status Monitoring
📈 Monitor Model Health
Keep track of model health and identify models that need attention.
async def monitor_model_health():
async with ModelRed() as client:
models = await client.list_models()
# Health metrics
health_report = {
'healthy': [],
'needs_testing': [],
'high_risk': [],
'inactive': []
}
for model in models:
model_id = model['modelId']
# Check if model is active
if not model.get('isActive', True):
health_report['inactive'].append(model_id)
continue
# Check testing status
last_tested = model.get('lastTested')
test_count = model.get('testCount', 0)
if not last_tested or test_count == 0:
health_report['needs_testing'].append(model_id)
continue
# Check recent assessment results
recent_assessments = model.get('recentAssessments', [])
if recent_assessments:
latest = recent_assessments[0]
risk_level = latest.get('riskLevel', 'UNKNOWN')
if risk_level in ['HIGH', 'CRITICAL']:
health_report['high_risk'].append({
'model_id': model_id,
'risk_level': risk_level,
'score': latest.get('overallScore', 'N/A')
})
else:
health_report['healthy'].append(model_id)
else:
health_report['needs_testing'].append(model_id)
# Print health report
print("🏥 Model Health Report")
print("=" * 25)
print(f"✅ Healthy Models: {len(health_report['healthy'])}")
for model_id in health_report['healthy']:
print(f" • {model_id}")
print(f"\n🧪 Need Testing: {len(health_report['needs_testing'])}")
for model_id in health_report['needs_testing']:
print(f" • {model_id}")
print(f"\n⚠️ High Risk Models: {len(health_report['high_risk'])}")
for model_info in health_report['high_risk']:
print(f" • {model_info['model_id']} - {model_info['risk_level']} ({model_info['score']}/10)")
print(f"\n❌ Inactive Models: {len(health_report['inactive'])}")
for model_id in health_report['inactive']:
print(f" • {model_id}")
# Recommendations
print("\n💡 Recommendations:")
if health_report['needs_testing']:
print(" • Run initial security assessments on untested models")
if health_report['high_risk']:
print(" • Address high-risk models immediately")
if health_report['inactive']:
print(" • Review inactive models and delete if no longer needed")
Export Model Inventory
📄 Export Model Data
Export your model inventory for reporting or analysis.
import json
import csv
from datetime import datetime
async def export_model_inventory():
async with ModelRed() as client:
models = await client.list_models()
# Prepare export data
export_data = []
for model in models:
# Get latest assessment info
latest_assessment = None
if model.get('recentAssessments'):
latest_assessment = model['recentAssessments'][0]
model_export = {
'model_id': model['modelId'],
'display_name': model.get('displayName', ''),
'provider': model['provider'],
'model_name': model.get('modelName', ''),
'description': model.get('description', ''),
'is_active': model.get('isActive', True),
'created_at': model['createdAt'],
'updated_at': model['updatedAt'],
'last_tested': model.get('lastTested', ''),
'test_count': model.get('testCount', 0),
'completed_assessments': model.get('completedAssessments', 0),
'latest_score': latest_assessment.get('overallScore') if latest_assessment else None,
'latest_risk_level': latest_assessment.get('riskLevel') if latest_assessment else None,
'environment': model.get('metadata', {}).get('environment', ''),
'team': model.get('metadata', {}).get('team', ''),
'owner': model.get('metadata', {}).get('owner', ''),
'created_by': model.get('createdByUser', {}).get('email', '')
}
export_data.append(model_export)
# Export to JSON
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
json_filename = f"model_inventory_{timestamp}.json"
with open(json_filename, 'w') as f:
json.dump(export_data, f, indent=2, default=str)
print(f"✅ Exported {len(export_data)} models to {json_filename}")
# Export to CSV
csv_filename = f"model_inventory_{timestamp}.csv"
if export_data:
with open(csv_filename, 'w', newline='') as f:
writer = csv.DictWriter(f, fieldnames=export_data[0].keys())
writer.writeheader()
writer.writerows(export_data)
print(f"✅ Exported {len(export_data)} models to {csv_filename}")
return export_data
# Usage
inventory_data = await export_model_inventory()
Common Listing Patterns
🔄 Regular Model Audits
async def weekly_model_audit():
"""Run a weekly audit of all models"""
async with ModelRed() as client:
models = await client.list_models()
print(f"🔍 Weekly Model Audit - {datetime.now().strftime('%Y-%m-%d')}")
print("=" * 50)
# Models by status
active_count = len([m for m in models if m.get('isActive', True)])
inactive_count = len(models) - active_count
# Testing coverage
tested_count = len([m for m in models if m.get('testCount', 0) > 0])
untested_count = len(models) - tested_count
# Recent activity
from datetime import datetime, timedelta
week_ago = datetime.now() - timedelta(days=7)
recently_tested = []
for model in models:
if model.get('lastTested'):
try:
last_test = datetime.fromisoformat(model['lastTested'].replace('Z', '+00:00'))
if last_test > week_ago:
recently_tested.append(model['modelId'])
except:
pass
print(f"📊 Summary:")
print(f" Total Models: {len(models)}")
print(f" Active: {active_count}, Inactive: {inactive_count}")
print(f" Tested: {tested_count}, Untested: {untested_count}")
print(f" Tested This Week: {len(recently_tested)}")
# Action items
print(f"\n📋 Action Items:")
if untested_count > 0:
print(f" • Schedule testing for {untested_count} untested models")
if inactive_count > 0:
print(f" • Review {inactive_count} inactive models for cleanup")
return {
'total': len(models),
'active': active_count,
'tested': tested_count,
'recently_tested': len(recently_tested)
}
Best Practices
💡 Model Listing Best Practices
Organization
Use consistent naming conventions
Tag models by environment/team
Include descriptive metadata
Regular inventory audits
Monitoring
Track testing coverage
Monitor security scores
Identify high-risk models
Clean up inactive models