Documentation

Monitoring Progress

Track the progress of running security assessments

📊

Real-time Assessment Monitoring

Track your security assessments in real-time with progress callbacks , status monitoring, and comprehensive logging for complete visibility.

Progress Callbacks

Real-time Updates

Get instant progress updates with custom callback functions that receive progress percentage and status information.

PYTHON
from modelred import ModelRed

def progress_callback(progress, status):
    """Custom progress callback with detailed logging"""
    print(f"📊 Progress: {progress}% - {status}")

    # Custom logic based on progress
    if progress == 25:
        print("🚀 Quarter way through assessment...")
    elif progress == 50:
        print("🔄 Halfway through assessment...")
    elif progress == 75:
        print("🏁 Almost finished...")
    elif progress == 100:
        print("🎉 Assessment completed!")

async with ModelRed() as client:
    result = await client.run_assessment(
        model_id="my-model",
        test_suites=["basic_security"],
        progress_callback=progress_callback,
        wait_for_completion=True
    )

    print(f"Final Score: {result.overall_score}/10")

Manual Status Polling

🔄 Manual Status Checking

For background assessments, manually poll the status to track progress and detect completion.

PYTHON
import asyncio
import time
from modelred import ModelRed

async def monitor_assessment(client, assessment_id, poll_interval=10):
    """Monitor assessment with custom polling logic"""
    start_time = time.time()

    while True:
        try:
            status_info = await client.get_assessment_status(assessment_id)
            status = status_info["status"]
            progress = status_info.get("progress", 0)

            elapsed = time.time() - start_time

            print(f"📊 Status: {status}")
            print(f"🔄 Progress: {progress}%")
            print(f"⏱️  Elapsed: {elapsed:.1f}s")

            if status == 'COMPLETED':
                print("✅ Assessment completed successfully!")
                return True
            elif status == 'FAILED':
                error_msg = status_info.get('error_message', 'Unknown error')
                print(f"❌ Assessment failed: {error_msg}")
                return False

            await asyncio.sleep(poll_interval)

        except Exception as e:
            print(f"⚠️ Error checking status: {e}")
            await asyncio.sleep(poll_interval)

# Usage example
async def run_monitored_assessment():
    async with ModelRed() as client:
        # Start assessment without waiting
        result = await client.run_assessment(
            model_id="my-model",
            test_suites=["basic_security"],
            wait_for_completion=False
        )

        assessment_id = result.assessment_id
        print(f"🚀 Assessment started: {assessment_id}")

        # Monitor progress
        success = await monitor_assessment(client, assessment_id)

        if success:
            # Get final results
            final_result = await client.get_assessment_results(assessment_id)
            print(f"📋 Final Score: {final_result.overall_score}/10")

# Run the example
asyncio.run(run_monitored_assessment())

Assessment Status States

Status Values & Meanings

Understand what each assessment status means and how to handle them.

QUEUED

Initial State

Assessment Queued

Assessment is waiting in the queue to start processing

RUNNING

Active State

Assessment Running

Assessment is currently executing security tests

COMPLETED

Final State

Assessment Completed

Assessment finished successfully with results available

FAILED

Error State

Assessment Failed

Assessment encountered an error and could not complete

Advanced Monitoring Patterns

🔧 Custom Monitor Class

Create a reusable monitoring class with advanced features like ETA calculation and logging.

PYTHON
import time
import logging
from datetime import datetime, timedelta

class AssessmentMonitor:
    """Advanced assessment monitoring with logging and ETA calculation"""

    def __init__(self, log_level=logging.INFO):
        self.start_time = None
        self.last_update = None
        self.progress_history = []

        # Setup logging
        self.logger = logging.getLogger(f"ModelRed.Monitor.{datetime.now().strftime('%H%M%S')}")
        self.logger.setLevel(log_level)

        if not self.logger.handlers:
            handler = logging.StreamHandler()
            formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
            handler.setFormatter(formatter)
            self.logger.addHandler(handler)

    def on_progress(self, progress, status):
        """Progress callback with advanced tracking"""
        current_time = time.time()

        if self.start_time is None:
            self.start_time = current_time
            self.logger.info(f"🚀 Assessment started - Status: {status}")

        # Track progress history
        self.progress_history.append({
            'timestamp': current_time,
            'progress': progress,
            'status': status
        })

        elapsed = current_time - self.start_time

        # Calculate ETA if we have progress
        if progress > 0 and progress < 100:
            estimated_total = elapsed / (progress / 100)
            remaining = estimated_total - elapsed
            eta = datetime.now() + timedelta(seconds=remaining)

            self.logger.info(f"📊 Progress: {progress}% | Status: {status}")
            self.logger.info(f"⏱️  Elapsed: {elapsed:.1f}s | ETA: {eta.strftime('%H:%M:%S')}")

            # Progress milestones
            if progress in [25, 50, 75]:
                self.logger.info(f"🎯 Milestone reached: {progress}% complete")

        elif progress == 100:
            total_duration = elapsed
            self.logger.info(f"🎉 Assessment completed in {total_duration:.1f}s")
            self._log_summary()

        self.last_update = current_time

    def _log_summary(self):
        """Log assessment summary"""
        if len(self.progress_history) > 1:
            total_time = self.progress_history[-1]['timestamp'] - self.progress_history[0]['timestamp']
            avg_progress_rate = 100 / total_time if total_time > 0 else 0

            self.logger.info("📋 Assessment Summary:")
            self.logger.info(f"   Duration: {total_time:.1f}s")
            self.logger.info(f"   Avg Rate: {avg_progress_rate:.1f}%/s")
            self.logger.info(f"   Updates: {len(self.progress_history)}")

    def get_stats(self):
        """Get monitoring statistics"""
        if not self.progress_history:
            return None

        return {
            'start_time': self.start_time,
            'total_updates': len(self.progress_history),
            'current_progress': self.progress_history[-1]['progress'],
            'current_status': self.progress_history[-1]['status'],
            'elapsed_time': time.time() - self.start_time if self.start_time else 0
        }

# Usage example
async def run_monitored_assessment():
    monitor = AssessmentMonitor()

    async with ModelRed() as client:
        result = await client.run_assessment(
            model_id="production-model",
            test_suites=["basic_security", "prompt_injection"],
            progress_callback=monitor.on_progress,
            wait_for_completion=True,
            timeout_minutes=30
        )

        # Get monitoring stats
        stats = monitor.get_stats()
        if stats:
            print(f"\n📊 Monitoring Stats:")
            print(f"   Total Updates: {stats['total_updates']}")
            print(f"   Final Progress: {stats['current_progress']}%")
            print(f"   Total Time: {stats['elapsed_time']:.1f}s")

        return result

# Run with monitoring
result = await run_monitored_assessment()

Multi-Assessment Monitoring

📊 Batch Assessment Monitoring

Monitor multiple assessments simultaneously with consolidated progress tracking.

PYTHON
import asyncio
from collections import defaultdict

class BatchMonitor:
    """Monitor multiple assessments simultaneously"""

    def __init__(self):
        self.assessments = {}
        self.progress_data = defaultdict(list)

    def add_assessment(self, assessment_id, model_id):
        """Add an assessment to monitor"""
        self.assessments[assessment_id] = {
            'model_id': model_id,
            'status': 'QUEUED',
            'progress': 0,
            'start_time': time.time()
        }

    async def monitor_all(self, client, update_interval=15):
        """Monitor all registered assessments"""
        print(f"🔄 Monitoring {len(self.assessments)} assessments...")

        while True:
            completed = 0
            failed = 0

            for assessment_id, info in self.assessments.items():
                try:
                    status_info = await client.get_assessment_status(assessment_id)

                    # Update status
                    old_progress = info['progress']
                    info['status'] = status_info['status']
                    info['progress'] = status_info.get('progress', 0)

                    # Log significant progress changes
                    if info['progress'] - old_progress >= 10:
                        elapsed = time.time() - info['start_time']
                        print(f"📊 {info['model_id']}: {info['progress']}% ({elapsed:.1f}s)")

                    # Count final states
                    if info['status'] == 'COMPLETED':
                        completed += 1
                    elif info['status'] == 'FAILED':
                        failed += 1

                except Exception as e:
                    print(f"⚠️ Error monitoring {assessment_id}: {e}")

            # Print summary
            total = len(self.assessments)
            running = total - completed - failed
            print(f"\n📋 Status: {completed} completed, {failed} failed, {running} running")

            # Exit if all done
            if completed + failed == total:
                print("🎉 All assessments finished!")
                break

            await asyncio.sleep(update_interval)

    def get_summary(self):
        """Get monitoring summary"""
        summary = {
            'total': len(self.assessments),
            'completed': 0,
            'failed': 0,
            'running': 0,
            'queued': 0
        }

        for info in self.assessments.values():
            status = info['status'].lower()
            if status == 'completed':
                summary['completed'] += 1
            elif status == 'failed':
                summary['failed'] += 1
            elif status == 'running':
                summary['running'] += 1
            else:
                summary['queued'] += 1

        return summary

# Usage example
async def batch_monitoring_example():
    monitor = BatchMonitor()

    async with ModelRed() as client:
        # Start multiple assessments
        models_to_test = [
            {"model_id": "prod-gpt-4", "suites": ["basic_security"]},
            {"model_id": "dev-claude", "suites": ["prompt_injection"]},
            {"model_id": "test-model", "suites": ["content_safety"]}
        ]

        # Start all assessments
        for model_config in models_to_test:
            result = await client.run_assessment(
                model_id=model_config["model_id"],
                test_suites=model_config["suites"],
                wait_for_completion=False
            )

            monitor.add_assessment(result.assessment_id, model_config["model_id"])
            print(f"🚀 Started assessment for {model_config['model_id']}")

        # Monitor all assessments
        await monitor.monitor_all(client, update_interval=10)

        # Print final summary
        summary = monitor.get_summary()
        print(f"\n📊 Final Summary:")
        print(f"   Total: {summary['total']}")
        print(f"   Completed: {summary['completed']}")
        print(f"   Failed: {summary['failed']}")

# Run batch monitoring
await batch_monitoring_example()

Best Practices

💡 Monitoring Best Practices

Progress Tracking

Use reasonable polling intervals (10-30s)
Log progress milestones (25%, 50%, 75%)
Calculate and display ETA
Handle network interruptions gracefully

Error Handling

Implement retry logic for status checks
Log all errors and status changes
Set reasonable timeout limits
Provide clear failure notifications

Next Steps