Skip to main content

SDK Examples

Practical examples demonstrating how to integrate Visent SDKs into real applications, from basic monitoring dashboards to complex automation systems. These examples cover common use cases and integration patterns. All examples include error handling, best practices, and production-ready code patterns.

Getting Started Examples

Coming soon - basic setup and first API calls across different SDKs.

Monitoring Dashboard

Python Flask Dashboard

from flask import Flask, render_template, jsonify
from visent import VisentClient
import os

app = Flask(__name__)
client = VisentClient(api_key=os.getenv('VISENT_API_KEY'))

@app.route('/')
def dashboard():
    return render_template('dashboard.html')

@app.route('/api/metrics')
def get_metrics():
    try:
        # Get metrics for all nodes
        nodes = ['gpu-worker-1', 'gpu-worker-2', 'gpu-worker-3']
        metrics = []
        
        for node in nodes:
            node_metrics = client.telemetry.get_metrics(node=node)
            metrics.append({
                'node': node,
                'utilization': node_metrics.utilization,
                'memory_used': node_metrics.memory_used,
                'temperature': node_metrics.temperature
            })
        
        return jsonify({'metrics': metrics})
    except Exception as e:
        return jsonify({'error': str(e)}), 500

if __name__ == '__main__':
    app.run(debug=True)

Node.js Express API

Coming soon - Express.js API server with real-time GPU monitoring.

Cost Optimization Tool

Python Cost Analyzer

import asyncio
from visent import AsyncVisentClient
from datetime import datetime, timedelta

class CostOptimizer:
    def __init__(self, api_key):
        self.client = AsyncVisentClient(api_key=api_key)
    
    async def analyze_workload_costs(self, workload_config):
        """Analyze costs across different providers and regions"""
        providers = ['aws', 'gcp', 'azure']
        regions = ['us-east-1', 'us-west-2', 'eu-west-1']
        gpu_types = ['h100', 'a100', 'v100']
        
        results = []
        
        for provider in providers:
            for region in regions:
                for gpu in gpu_types:
                    try:
                        pricing = await self.client.atlas.get_current_pricing(
                            gpu=gpu,
                            provider=provider,
                            region=region
                        )
                        
                        # Calculate monthly cost estimate
                        monthly_cost = pricing.price_per_hour * 24 * 30
                        
                        results.append({
                            'provider': provider,
                            'region': region,
                            'gpu': gpu,
                            'hourly_cost': pricing.price_per_hour,
                            'monthly_cost': monthly_cost,
                            'availability': pricing.availability
                        })
                        
                    except Exception as e:
                        print(f"Error getting pricing for {provider}/{region}/{gpu}: {e}")
        
        # Sort by cost
        results.sort(key=lambda x: x['monthly_cost'])
        return results
    
    async def get_optimization_recommendations(self, budget, requirements):
        """Get cost optimization recommendations"""
        return await self.client.atlas.get_optimization_recommendations(
            budget=budget,
            workload='ml-training',
            requirements=requirements
        )

async def main():
    optimizer = CostOptimizer(api_key='your_api_key')
    
    # Analyze costs
    cost_analysis = await optimizer.analyze_workload_costs({})
    
    print("Top 5 most cost-effective options:")
    for option in cost_analysis[:5]:
        print(f"{option['provider']} {option['region']} {option['gpu']}: ${option['monthly_cost']:.2f}/month")
    
    # Get recommendations
    recommendations = await optimizer.get_optimization_recommendations(
        budget=5000,
        requirements={'min_gpus': 4, 'memory_per_gpu': '40GB'}
    )
    
    print(f"\nOptimization recommendations: {recommendations}")

if __name__ == '__main__':
    asyncio.run(main())

Automated Benchmarking

Go Benchmark Automation

package main

import (
    "context"
    "fmt"
    "log"
    "time"
    
    "github.com/visent/go-sdk"
)

type BenchmarkSuite struct {
    client *visent.Client
}

func NewBenchmarkSuite(apiKey string) *BenchmarkSuite {
    return &BenchmarkSuite{
        client: visent.NewClient(visent.Config{
            APIKey: apiKey,
        }),
    }
}

func (bs *BenchmarkSuite) RunComparisonBenchmark(ctx context.Context) error {
    gpus := []string{"h100", "a100", "v100"}
    models := []string{"resnet50", "bert-large", "gpt-3"}
    
    results := make(map[string]map[string]*visent.BenchmarkResult)
    
    for _, gpu := range gpus {
        results[gpu] = make(map[string]*visent.BenchmarkResult)
        
        for _, model := range models {
            fmt.Printf("Running benchmark: %s on %s\n", model, gpu)
            
            // Start benchmark
            benchmark, err := bs.client.Forge.StartBenchmark(ctx, &visent.BenchmarkRequest{
                Type: "ml-training",
                GPU:  gpu,
                Config: map[string]interface{}{
                    "model":      model,
                    "batch_size": 32,
                    "iterations": 100,
                },
            })
            if err != nil {
                log.Printf("Failed to start benchmark %s/%s: %v", gpu, model, err)
                continue
            }
            
            // Wait for completion
            result, err := bs.client.Forge.WaitForCompletion(ctx, benchmark.JobID, visent.WaitOptions{
                PollInterval: 30 * time.Second,
                Timeout:      time.Hour,
            })
            if err != nil {
                log.Printf("Benchmark %s/%s failed: %v", gpu, model, err)
                continue
            }
            
            results[gpu][model] = result
            fmt.Printf("Completed: %s/%s - Throughput: %.1f ops/sec\n", 
                gpu, model, result.Metrics.Throughput)
        }
    }
    
    // Generate comparison report
    bs.generateReport(results)
    return nil
}

func (bs *BenchmarkSuite) generateReport(results map[string]map[string]*visent.BenchmarkResult) {
    fmt.Println("\n=== Benchmark Comparison Report ===")
    fmt.Printf("%-10s %-15s %-15s %-10s\n", "GPU", "Model", "Throughput", "Latency")
    fmt.Println(strings.Repeat("-", 55))
    
    for gpu, modelResults := range results {
        for model, result := range modelResults {
            if result != nil {
                fmt.Printf("%-10s %-15s %-15.1f %-10.1f\n",
                    gpu, model, result.Metrics.Throughput, result.Metrics.Latency)
            }
        }
    }
}

func main() {
    suite := NewBenchmarkSuite(os.Getenv("VISENT_API_KEY"))
    
    ctx := context.Background()
    if err := suite.RunComparisonBenchmark(ctx); err != nil {
        log.Fatal(err)
    }
}

Real-time Alerting

Coming soon - webhook handling and real-time alert processing examples.

CI/CD Integration

Coming soon - integrating Visent benchmarks into CI/CD pipelines.

Multi-cloud Monitoring

Coming soon - monitoring GPU infrastructure across multiple cloud providers.

Data Analytics

Coming soon - analyzing historical metrics and generating insights.

Custom Integrations

Coming soon - building custom tools and integrations with Visent APIs.

Next Steps