package sandbox import ( "context" "crypto/rand" "crypto/rsa" "crypto/x509" "encoding/json" "encoding/pem" "fmt" "net/http" "net/http/httptest" "net/url" "strings" "sync" "sync/atomic" "testing" "time" "golang.org/x/crypto/ssh" ) // ============================================================================= // Mock Proxmox HTTP Server (enhanced with error injection + call tracking) // ============================================================================= type mockProxmoxHandler struct { mu sync.Mutex containers map[int]ContainerStatus nextID int tasks map[string]string // taskID → exitstatus // Error injection: endpoint → number of calls before it starts succeeding. // Key is a pattern like "clone", "start", "config", "interfaces", "nextid", // "status/current", "delete", "execute", "task". failUntil map[string]int // Call counters per endpoint pattern. callCounts map[string]int // If set, task polling will return "running" this many times before "stopped". taskPollingRounds int // If > 0, interfaces endpoint returns no IP for this many calls before returning one. interfaceEmptyRounds int // If set, the task exit status to return (default "OK"). taskExitStatus string // Track all received requests for verification. requests []requestRecord } type requestRecord struct { Method string Path string Form url.Values } func newMockProxmoxHandler() *mockProxmoxHandler { return &mockProxmoxHandler{ containers: make(map[int]ContainerStatus), nextID: 200, tasks: make(map[string]string), failUntil: make(map[string]int), callCounts: make(map[string]int), taskExitStatus: "OK", } } func (m *mockProxmoxHandler) shouldFail(endpoint string, statusCode int, w http.ResponseWriter) bool { m.callCounts[endpoint]++ failCount, ok := m.failUntil[endpoint] if ok && m.callCounts[endpoint] <= failCount { http.Error(w, fmt.Sprintf("injected error for %s (call %d/%d)", endpoint, m.callCounts[endpoint], failCount), statusCode) return true } return false } func (m *mockProxmoxHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { m.mu.Lock() defer m.mu.Unlock() path := r.URL.Path // Verify auth header. auth := r.Header.Get("Authorization") if !strings.HasPrefix(auth, "PVEAPIToken=") { http.Error(w, "unauthorized", http.StatusUnauthorized) return } // Parse form for POST/PUT. r.ParseForm() m.requests = append(m.requests, requestRecord{ Method: r.Method, Path: path, Form: r.Form, }) w.Header().Set("Content-Type", "application/json") switch { case path == "/api2/json/cluster/nextid" && r.Method == http.MethodGet: if m.shouldFail("nextid", http.StatusInternalServerError, w) { return } m.handleNextID(w) case strings.HasSuffix(path, "/clone") && r.Method == http.MethodPost: if m.shouldFail("clone", http.StatusInternalServerError, w) { return } m.handleClone(w, r) case strings.HasSuffix(path, "/config") && r.Method == http.MethodPut: if m.shouldFail("config", http.StatusInternalServerError, w) { return } m.handleConfig(w) case strings.HasSuffix(path, "/status/start") && r.Method == http.MethodPost: if m.shouldFail("start", http.StatusInternalServerError, w) { return } m.handleStart(w, r) case strings.HasSuffix(path, "/status/stop") && r.Method == http.MethodPost: if m.shouldFail("stop", http.StatusInternalServerError, w) { return } m.handleStop(w, r) case strings.HasSuffix(path, "/status/current") && r.Method == http.MethodGet: if m.shouldFail("status/current", http.StatusInternalServerError, w) { return } m.handleStatusCurrent(w, r) case strings.HasSuffix(path, "/interfaces") && r.Method == http.MethodGet: if m.shouldFail("interfaces", http.StatusInternalServerError, w) { return } m.handleInterfaces(w) case strings.Contains(path, "/tasks/") && strings.HasSuffix(path, "/status"): if m.shouldFail("task", http.StatusInternalServerError, w) { return } m.handleTaskStatus(w) case r.Method == http.MethodDelete && strings.Contains(path, "/lxc/"): if m.shouldFail("delete", http.StatusInternalServerError, w) { return } m.handleDelete(w, r) case strings.HasSuffix(path, "/execute") && r.Method == http.MethodPost: if m.shouldFail("execute", http.StatusInternalServerError, w) { return } m.handleExecute(w) default: http.Error(w, fmt.Sprintf("unhandled: %s %s", r.Method, path), http.StatusNotFound) } } func (m *mockProxmoxHandler) handleNextID(w http.ResponseWriter) { id := m.nextID m.nextID++ json.NewEncoder(w).Encode(map[string]any{"data": id}) } func (m *mockProxmoxHandler) handleClone(w http.ResponseWriter, r *http.Request) { taskID := "UPID:pve:clone-task" m.tasks[taskID] = m.taskExitStatus json.NewEncoder(w).Encode(map[string]any{"data": taskID}) } func (m *mockProxmoxHandler) handleConfig(w http.ResponseWriter) { json.NewEncoder(w).Encode(map[string]any{"data": nil}) } func (m *mockProxmoxHandler) handleStart(w http.ResponseWriter, r *http.Request) { parts := strings.Split(r.URL.Path, "/") for i, p := range parts { if p == "lxc" && i+1 < len(parts) { var vmid int fmt.Sscanf(parts[i+1], "%d", &vmid) m.containers[vmid] = ContainerStatus{ Status: "running", CPU: 0.15, Mem: 256 * 1024 * 1024, MaxMem: 1024 * 1024 * 1024, Disk: 1024 * 1024 * 1024, MaxDisk: 8 * 1024 * 1024 * 1024, NetIn: 1024 * 1024, NetOut: 512 * 1024, Uptime: 120, } break } } taskID := "UPID:pve:start-task" m.tasks[taskID] = m.taskExitStatus json.NewEncoder(w).Encode(map[string]any{"data": taskID}) } func (m *mockProxmoxHandler) handleStop(w http.ResponseWriter, r *http.Request) { parts := strings.Split(r.URL.Path, "/") for i, p := range parts { if p == "lxc" && i+1 < len(parts) { var vmid int fmt.Sscanf(parts[i+1], "%d", &vmid) m.containers[vmid] = ContainerStatus{Status: "stopped"} break } } taskID := "UPID:pve:stop-task" m.tasks[taskID] = m.taskExitStatus json.NewEncoder(w).Encode(map[string]any{"data": taskID}) } func (m *mockProxmoxHandler) handleStatusCurrent(w http.ResponseWriter, r *http.Request) { parts := strings.Split(r.URL.Path, "/") for i, p := range parts { if p == "lxc" && i+1 < len(parts) { var vmid int fmt.Sscanf(parts[i+1], "%d", &vmid) status, ok := m.containers[vmid] if !ok { status = ContainerStatus{Status: "stopped"} } json.NewEncoder(w).Encode(map[string]any{"data": status}) return } } http.Error(w, "not found", http.StatusNotFound) } func (m *mockProxmoxHandler) handleInterfaces(w http.ResponseWriter) { m.callCounts["interfaces_data"]++ if m.interfaceEmptyRounds > 0 && m.callCounts["interfaces_data"] <= m.interfaceEmptyRounds { // Return interfaces with no IP yet. ifaces := []map[string]string{ {"name": "lo", "inet": "127.0.0.1/8"}, {"name": "eth0", "inet": "", "hwaddr": "AA:BB:CC:DD:EE:FF"}, } json.NewEncoder(w).Encode(map[string]any{"data": ifaces}) return } ifaces := []map[string]string{ {"name": "lo", "inet": "127.0.0.1/8"}, {"name": "eth0", "inet": "10.99.1.5/16", "hwaddr": "AA:BB:CC:DD:EE:FF"}, } json.NewEncoder(w).Encode(map[string]any{"data": ifaces}) } func (m *mockProxmoxHandler) handleTaskStatus(w http.ResponseWriter) { m.callCounts["task_poll"]++ if m.taskPollingRounds > 0 && m.callCounts["task_poll"] <= m.taskPollingRounds { json.NewEncoder(w).Encode(map[string]any{ "data": map[string]any{ "status": "running", "exitstatus": "", }, }) return } json.NewEncoder(w).Encode(map[string]any{ "data": map[string]any{ "status": "stopped", "exitstatus": m.taskExitStatus, }, }) } func (m *mockProxmoxHandler) handleDelete(w http.ResponseWriter, r *http.Request) { parts := strings.Split(r.URL.Path, "/") for i, p := range parts { if p == "lxc" && i+1 < len(parts) { var vmid int fmt.Sscanf(parts[i+1], "%d", &vmid) delete(m.containers, vmid) break } } taskID := "UPID:pve:delete-task" m.tasks[taskID] = m.taskExitStatus json.NewEncoder(w).Encode(map[string]any{"data": taskID}) } func (m *mockProxmoxHandler) handleExecute(w http.ResponseWriter) { json.NewEncoder(w).Encode(map[string]any{"data": ""}) } // ============================================================================= // Test helpers // ============================================================================= func newTestProxmoxClient(t *testing.T, handler *mockProxmoxHandler) (*ProxmoxClient, *httptest.Server) { t.Helper() server := httptest.NewTLSServer(handler) client := NewProxmoxClient(ProxmoxConfig{ BaseURL: server.URL, TokenID: "test@pve!test-token", Secret: "test-secret", Node: "pve", TemplateID: 9000, Pool: "sandbox-pool", Bridge: "vmbr1", InsecureSkipVerify: true, }) client.http = server.Client() return client, server } func newTestProxmoxClientNoPool(t *testing.T, handler *mockProxmoxHandler) (*ProxmoxClient, *httptest.Server) { t.Helper() server := httptest.NewTLSServer(handler) client := NewProxmoxClient(ProxmoxConfig{ BaseURL: server.URL, TokenID: "test@pve!test-token", Secret: "test-secret", Node: "pve", TemplateID: 9000, Pool: "", Bridge: "vmbr1", InsecureSkipVerify: true, }) client.http = server.Client() return client, server } func generateTestSigner(t *testing.T) ssh.Signer { t.Helper() key, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { t.Fatalf("generate RSA key: %v", err) } signer, err := ssh.NewSignerFromKey(key) if err != nil { t.Fatalf("create signer: %v", err) } return signer } func newTestSandbox(t *testing.T, handler *mockProxmoxHandler) (*Sandbox, *httptest.Server) { t.Helper() client, server := newTestProxmoxClient(t, handler) signer := generateTestSigner(t) sshExec := NewSSHExecutor("10.99.1.5", SSHConfig{Signer: signer}) // Start the container in the mock so it has a "running" status. ctx := context.Background() if err := client.StartContainer(ctx, 200); err != nil { t.Fatalf("start container: %v", err) } return &Sandbox{ ID: 200, IP: "10.99.1.5", proxmox: client, ssh: sshExec, }, server } // ============================================================================= // Proxmox Client Tests // ============================================================================= func TestProxmoxNextAvailableID(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() id, err := client.NextAvailableID(context.Background()) if err != nil { t.Fatalf("NextAvailableID: %v", err) } if id != 200 { t.Errorf("expected VMID 200, got %d", id) } // Second call should return 201. id2, err := client.NextAvailableID(context.Background()) if err != nil { t.Fatalf("NextAvailableID (2nd): %v", err) } if id2 != 201 { t.Errorf("expected VMID 201, got %d", id2) } } func TestProxmoxNextAvailableID_ServerError(t *testing.T) { handler := newMockProxmoxHandler() handler.failUntil["nextid"] = 999 // Always fail. client, server := newTestProxmoxClient(t, handler) defer server.Close() _, err := client.NextAvailableID(context.Background()) if err == nil { t.Fatal("expected error, got nil") } if !strings.Contains(err.Error(), "500") { t.Errorf("expected 500 error, got: %v", err) } } func TestProxmoxCloneTemplate(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() err := client.CloneTemplate(context.Background(), 200, ContainerConfig{ Hostname: "test-sandbox", }) if err != nil { t.Fatalf("CloneTemplate: %v", err) } // Verify clone request had the right params. handler.mu.Lock() defer handler.mu.Unlock() found := false for _, req := range handler.requests { if strings.HasSuffix(req.Path, "/clone") { found = true if req.Form.Get("hostname") != "test-sandbox" { t.Errorf("expected hostname=test-sandbox, got %q", req.Form.Get("hostname")) } if req.Form.Get("newid") != "200" { t.Errorf("expected newid=200, got %q", req.Form.Get("newid")) } if req.Form.Get("full") != "1" { t.Errorf("expected full=1, got %q", req.Form.Get("full")) } if req.Form.Get("pool") != "sandbox-pool" { t.Errorf("expected pool=sandbox-pool, got %q", req.Form.Get("pool")) } break } } if !found { t.Error("clone request not found in recorded requests") } } func TestProxmoxCloneTemplate_DefaultHostname(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() err := client.CloneTemplate(context.Background(), 201, ContainerConfig{}) if err != nil { t.Fatalf("CloneTemplate: %v", err) } handler.mu.Lock() defer handler.mu.Unlock() for _, req := range handler.requests { if strings.HasSuffix(req.Path, "/clone") { if req.Form.Get("hostname") != "sandbox-201" { t.Errorf("expected default hostname=sandbox-201, got %q", req.Form.Get("hostname")) } break } } } func TestProxmoxCloneTemplate_NoPool(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClientNoPool(t, handler) defer server.Close() err := client.CloneTemplate(context.Background(), 200, ContainerConfig{Hostname: "test"}) if err != nil { t.Fatalf("CloneTemplate: %v", err) } handler.mu.Lock() defer handler.mu.Unlock() for _, req := range handler.requests { if strings.HasSuffix(req.Path, "/clone") { if pool := req.Form.Get("pool"); pool != "" { t.Errorf("expected no pool param, got %q", pool) } break } } } func TestProxmoxCloneTemplate_ServerError(t *testing.T) { handler := newMockProxmoxHandler() handler.failUntil["clone"] = 999 client, server := newTestProxmoxClient(t, handler) defer server.Close() err := client.CloneTemplate(context.Background(), 200, ContainerConfig{Hostname: "test"}) if err == nil { t.Fatal("expected error, got nil") } } func TestProxmoxContainerLifecycle(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() ctx := context.Background() // Start. if err := client.StartContainer(ctx, 200); err != nil { t.Fatalf("StartContainer: %v", err) } status, err := client.GetContainerStatus(ctx, 200) if err != nil { t.Fatalf("GetContainerStatus: %v", err) } if status.Status != "running" { t.Errorf("expected status 'running', got %q", status.Status) } // Stop. if err := client.StopContainer(ctx, 200); err != nil { t.Fatalf("StopContainer: %v", err) } status, err = client.GetContainerStatus(ctx, 200) if err != nil { t.Fatalf("GetContainerStatus: %v", err) } if status.Status != "stopped" { t.Errorf("expected status 'stopped', got %q", status.Status) } } func TestProxmoxContainerStatus_FullFields(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() ctx := context.Background() if err := client.StartContainer(ctx, 200); err != nil { t.Fatalf("StartContainer: %v", err) } status, err := client.GetContainerStatus(ctx, 200) if err != nil { t.Fatalf("GetContainerStatus: %v", err) } if status.CPU != 0.15 { t.Errorf("expected CPU 0.15, got %f", status.CPU) } if status.Mem != 256*1024*1024 { t.Errorf("expected Mem %d, got %d", 256*1024*1024, status.Mem) } if status.MaxMem != 1024*1024*1024 { t.Errorf("expected MaxMem %d, got %d", 1024*1024*1024, status.MaxMem) } if status.Disk != 1024*1024*1024 { t.Errorf("expected Disk %d, got %d", 1024*1024*1024, status.Disk) } if status.MaxDisk != 8*1024*1024*1024 { t.Errorf("expected MaxDisk %d, got %d", 8*1024*1024*1024, status.MaxDisk) } if status.NetIn != 1024*1024 { t.Errorf("expected NetIn %d, got %d", 1024*1024, status.NetIn) } if status.NetOut != 512*1024 { t.Errorf("expected NetOut %d, got %d", 512*1024, status.NetOut) } if status.Uptime != 120 { t.Errorf("expected Uptime 120, got %d", status.Uptime) } } func TestProxmoxContainerStatus_UnknownContainer(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() // Container 999 doesn't exist in the mock — returns default "stopped". status, err := client.GetContainerStatus(context.Background(), 999) if err != nil { t.Fatalf("GetContainerStatus: %v", err) } if status.Status != "stopped" { t.Errorf("expected 'stopped' for unknown container, got %q", status.Status) } } func TestProxmoxGetContainerIP(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() ip, err := client.GetContainerIP(context.Background(), 200) if err != nil { t.Fatalf("GetContainerIP: %v", err) } if ip != "10.99.1.5" { t.Errorf("expected IP 10.99.1.5, got %q", ip) } } func TestProxmoxGetContainerIP_Polling(t *testing.T) { handler := newMockProxmoxHandler() handler.interfaceEmptyRounds = 2 // First 2 calls return no IP. client, server := newTestProxmoxClient(t, handler) defer server.Close() ip, err := client.GetContainerIP(context.Background(), 200) if err != nil { t.Fatalf("GetContainerIP: %v", err) } if ip != "10.99.1.5" { t.Errorf("expected IP 10.99.1.5, got %q", ip) } // Should have needed at least 3 calls. handler.mu.Lock() count := handler.callCounts["interfaces_data"] handler.mu.Unlock() if count < 3 { t.Errorf("expected at least 3 interface calls (polling), got %d", count) } } func TestProxmoxGetContainerIP_Timeout(t *testing.T) { handler := newMockProxmoxHandler() handler.interfaceEmptyRounds = 9999 // Never return an IP. client, server := newTestProxmoxClient(t, handler) defer server.Close() ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() _, err := client.GetContainerIP(ctx, 200) if err == nil { t.Fatal("expected timeout error, got nil") } if !strings.Contains(err.Error(), "deadline exceeded") && !strings.Contains(err.Error(), "context") { t.Errorf("expected context error, got: %v", err) } } func TestProxmoxGetContainerIP_SkipsLoopback(t *testing.T) { // The default mock returns lo (127.0.0.1) and eth0 (10.99.1.5). // Verify it skips lo and returns eth0. handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() ip, err := client.GetContainerIP(context.Background(), 200) if err != nil { t.Fatalf("GetContainerIP: %v", err) } if ip == "127.0.0.1" { t.Error("should not return loopback address") } if ip != "10.99.1.5" { t.Errorf("expected 10.99.1.5, got %q", ip) } } func TestProxmoxDestroyContainer(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() ctx := context.Background() if err := client.StartContainer(ctx, 200); err != nil { t.Fatalf("StartContainer: %v", err) } if err := client.DestroyContainer(ctx, 200); err != nil { t.Fatalf("DestroyContainer: %v", err) } handler.mu.Lock() _, exists := handler.containers[200] handler.mu.Unlock() if exists { t.Error("container 200 should have been deleted") } } func TestProxmoxDestroyContainer_AlreadyStopped(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() // Container 200 doesn't exist → status defaults to "stopped". // Destroy should skip the stop step and go straight to delete. err := client.DestroyContainer(context.Background(), 200) if err != nil { t.Fatalf("DestroyContainer on stopped container: %v", err) } } func TestProxmoxConfigureContainer(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() err := client.ConfigureContainer(context.Background(), 200, ContainerConfig{ CPUs: 2, MemoryMB: 2048, }) if err != nil { t.Fatalf("ConfigureContainer: %v", err) } // Verify the request params. handler.mu.Lock() defer handler.mu.Unlock() for _, req := range handler.requests { if strings.HasSuffix(req.Path, "/config") { if req.Form.Get("cores") != "2" { t.Errorf("expected cores=2, got %q", req.Form.Get("cores")) } if req.Form.Get("memory") != "2048" { t.Errorf("expected memory=2048, got %q", req.Form.Get("memory")) } if req.Form.Get("swap") != "0" { t.Errorf("expected swap=0, got %q", req.Form.Get("swap")) } if !strings.Contains(req.Form.Get("net0"), "bridge=vmbr1") { t.Errorf("expected net0 to contain bridge=vmbr1, got %q", req.Form.Get("net0")) } break } } } func TestProxmoxConfigureContainer_ZeroValueDefaults(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() // CPUs=0 and MemoryMB=0 should default to 1 and 1024. err := client.ConfigureContainer(context.Background(), 200, ContainerConfig{}) if err != nil { t.Fatalf("ConfigureContainer: %v", err) } handler.mu.Lock() defer handler.mu.Unlock() for _, req := range handler.requests { if strings.HasSuffix(req.Path, "/config") { if req.Form.Get("cores") != "1" { t.Errorf("expected default cores=1, got %q", req.Form.Get("cores")) } if req.Form.Get("memory") != "1024" { t.Errorf("expected default memory=1024, got %q", req.Form.Get("memory")) } break } } } func TestProxmoxConfigureContainer_ServerError(t *testing.T) { handler := newMockProxmoxHandler() handler.failUntil["config"] = 999 client, server := newTestProxmoxClient(t, handler) defer server.Close() err := client.ConfigureContainer(context.Background(), 200, ContainerConfig{CPUs: 1, MemoryMB: 1024}) if err == nil { t.Fatal("expected error, got nil") } } func TestProxmoxEnableDisableInternet(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() ctx := context.Background() if err := client.EnableInternet(ctx, "10.99.1.5"); err != nil { t.Fatalf("EnableInternet: %v", err) } if err := client.DisableInternet(ctx, "10.99.1.5"); err != nil { t.Fatalf("DisableInternet: %v", err) } // Verify execute commands were sent. handler.mu.Lock() defer handler.mu.Unlock() execCount := 0 for _, req := range handler.requests { if strings.HasSuffix(req.Path, "/execute") { execCount++ } } if execCount != 2 { t.Errorf("expected 2 execute requests (enable + disable), got %d", execCount) } } func TestProxmoxEnableInternet_ServerError(t *testing.T) { handler := newMockProxmoxHandler() handler.failUntil["execute"] = 999 client, server := newTestProxmoxClient(t, handler) defer server.Close() err := client.EnableInternet(context.Background(), "10.99.1.5") if err == nil { t.Fatal("expected error, got nil") } } func TestProxmoxAuthRequired(t *testing.T) { handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { auth := r.Header.Get("Authorization") if auth != "PVEAPIToken=valid@pve!tok=secret123" { http.Error(w, "unauthorized", http.StatusUnauthorized) return } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(map[string]any{"data": 200}) }) server := httptest.NewTLSServer(handler) defer server.Close() // Wrong credentials. client := NewProxmoxClient(ProxmoxConfig{ BaseURL: server.URL, TokenID: "wrong@pve!tok", Secret: "wrong", Node: "pve", InsecureSkipVerify: true, }) client.http = server.Client() _, err := client.NextAvailableID(context.Background()) if err == nil { t.Fatal("expected error with wrong auth, got nil") } if !strings.Contains(err.Error(), "401") { t.Errorf("expected 401 error, got: %v", err) } // Correct credentials. client2 := NewProxmoxClient(ProxmoxConfig{ BaseURL: server.URL, TokenID: "valid@pve!tok", Secret: "secret123", Node: "pve", InsecureSkipVerify: true, }) client2.http = server.Client() id, err := client2.NextAvailableID(context.Background()) if err != nil { t.Fatalf("expected success with correct auth, got: %v", err) } if id != 200 { t.Errorf("expected VMID 200, got %d", id) } } func TestProxmoxContextCancellation(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() ctx, cancel := context.WithCancel(context.Background()) cancel() // Cancel immediately. _, err := client.NextAvailableID(ctx) if err == nil { t.Fatal("expected error with cancelled context, got nil") } } func TestProxmoxContextCancellation_Start(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() ctx, cancel := context.WithCancel(context.Background()) cancel() err := client.StartContainer(ctx, 200) if err == nil { t.Fatal("expected error with cancelled context, got nil") } } func TestProxmoxContextCancellation_Clone(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() ctx, cancel := context.WithCancel(context.Background()) cancel() err := client.CloneTemplate(ctx, 200, ContainerConfig{Hostname: "test"}) if err == nil { t.Fatal("expected error with cancelled context, got nil") } } func TestProxmoxHTTPErrors(t *testing.T) { tests := []struct { name string statusCode int body string }{ {"400 Bad Request", 400, "invalid parameter"}, {"403 Forbidden", 403, "permission denied"}, {"404 Not Found", 404, "resource not found"}, {"500 Internal Error", 500, "internal server error"}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, tt.body, tt.statusCode) })) defer server.Close() client := NewProxmoxClient(ProxmoxConfig{ BaseURL: server.URL, TokenID: "test@pve!tok", Secret: "secret", Node: "pve", InsecureSkipVerify: true, }) client.http = server.Client() _, err := client.NextAvailableID(context.Background()) if err == nil { t.Fatalf("expected error for HTTP %d, got nil", tt.statusCode) } if !strings.Contains(err.Error(), fmt.Sprintf("%d", tt.statusCode)) { t.Errorf("expected error to contain '%d', got: %v", tt.statusCode, err) } if !strings.Contains(err.Error(), tt.body) { t.Errorf("expected error to contain %q, got: %v", tt.body, err) } }) } } func TestProxmoxInvalidJSON(t *testing.T) { server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(200) w.Write([]byte("not json at all")) })) defer server.Close() client := NewProxmoxClient(ProxmoxConfig{ BaseURL: server.URL, TokenID: "test@pve!tok", Secret: "secret", Node: "pve", InsecureSkipVerify: true, }) client.http = server.Client() _, err := client.NextAvailableID(context.Background()) if err == nil { t.Fatal("expected error for invalid JSON, got nil") } if !strings.Contains(err.Error(), "decode") { t.Errorf("expected decode error, got: %v", err) } } func TestProxmoxTaskPolling(t *testing.T) { handler := newMockProxmoxHandler() handler.taskPollingRounds = 3 // Task returns "running" for 3 polls. client, server := newTestProxmoxClient(t, handler) defer server.Close() // StartContainer triggers waitForTask which should poll until done. err := client.StartContainer(context.Background(), 200) if err != nil { t.Fatalf("StartContainer (with polling): %v", err) } handler.mu.Lock() pollCount := handler.callCounts["task_poll"] handler.mu.Unlock() if pollCount < 4 { t.Errorf("expected at least 4 task polls (3 running + 1 stopped), got %d", pollCount) } } func TestProxmoxTaskFailed(t *testing.T) { handler := newMockProxmoxHandler() handler.taskExitStatus = "TASK ERROR: storage full" client, server := newTestProxmoxClient(t, handler) defer server.Close() err := client.StartContainer(context.Background(), 200) if err == nil { t.Fatal("expected error for failed task, got nil") } if !strings.Contains(err.Error(), "storage full") { t.Errorf("expected 'storage full' in error, got: %v", err) } } func TestProxmoxTaskPolling_ContextTimeout(t *testing.T) { handler := newMockProxmoxHandler() handler.taskPollingRounds = 9999 // Never completes. client, server := newTestProxmoxClient(t, handler) defer server.Close() ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel() err := client.StartContainer(ctx, 200) if err == nil { t.Fatal("expected timeout error, got nil") } } func TestProxmoxWaitForTask_EmptyTaskID(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() // waitForTask with empty taskID should be a no-op. err := client.waitForTask(context.Background(), "") if err != nil { t.Fatalf("waitForTask with empty ID: %v", err) } } func TestProxmoxStartContainer_ServerError(t *testing.T) { handler := newMockProxmoxHandler() handler.failUntil["start"] = 999 client, server := newTestProxmoxClient(t, handler) defer server.Close() err := client.StartContainer(context.Background(), 200) if err == nil { t.Fatal("expected error, got nil") } } func TestProxmoxStopContainer_ServerError(t *testing.T) { handler := newMockProxmoxHandler() handler.failUntil["stop"] = 999 client, server := newTestProxmoxClient(t, handler) defer server.Close() // Start first (so the mock has the container running). if err := client.StartContainer(context.Background(), 200); err != nil { t.Fatalf("start: %v", err) } handler.failUntil["stop"] = 999 handler.callCounts["stop"] = 0 err := client.StopContainer(context.Background(), 200) if err == nil { t.Fatal("expected error, got nil") } } func TestProxmoxDestroyContainer_DeleteError(t *testing.T) { handler := newMockProxmoxHandler() handler.failUntil["delete"] = 999 client, server := newTestProxmoxClient(t, handler) defer server.Close() // Container defaults to "stopped" so destroy goes straight to delete. err := client.DestroyContainer(context.Background(), 200) if err == nil { t.Fatal("expected error from delete, got nil") } } func TestProxmoxGetContainerStatus_ServerError(t *testing.T) { handler := newMockProxmoxHandler() handler.failUntil["status/current"] = 999 client, server := newTestProxmoxClient(t, handler) defer server.Close() _, err := client.GetContainerStatus(context.Background(), 200) if err == nil { t.Fatal("expected error, got nil") } } // ============================================================================= // SSH Executor Tests // ============================================================================= func TestSSHExecutorDefaults(t *testing.T) { signer := generateTestSigner(t) exec := NewSSHExecutor("10.99.1.5", SSHConfig{Signer: signer}) if exec.config.User != "sandbox" { t.Errorf("expected default user 'sandbox', got %q", exec.config.User) } if exec.config.ConnectTimeout != 10*time.Second { t.Errorf("expected default connect timeout 10s, got %v", exec.config.ConnectTimeout) } if exec.config.CommandTimeout != 60*time.Second { t.Errorf("expected default command timeout 60s, got %v", exec.config.CommandTimeout) } } func TestSSHExecutorCustomConfig(t *testing.T) { signer := generateTestSigner(t) exec := NewSSHExecutor("10.99.1.5", SSHConfig{ Signer: signer, User: "admin", ConnectTimeout: 30 * time.Second, CommandTimeout: 120 * time.Second, }) if exec.config.User != "admin" { t.Errorf("expected user 'admin', got %q", exec.config.User) } if exec.config.ConnectTimeout != 30*time.Second { t.Errorf("expected connect timeout 30s, got %v", exec.config.ConnectTimeout) } if exec.config.CommandTimeout != 120*time.Second { t.Errorf("expected command timeout 120s, got %v", exec.config.CommandTimeout) } } func TestSSHExecutorNotConnected(t *testing.T) { signer := generateTestSigner(t) exec := NewSSHExecutor("10.99.1.5", SSHConfig{Signer: signer}) _, err := exec.Exec(context.Background(), "echo hello") if err == nil { t.Fatal("expected error when not connected, got nil") } if !strings.Contains(err.Error(), "not connected") { t.Errorf("expected 'not connected' error, got: %v", err) } } func TestSSHExecutorUploadNotConnected(t *testing.T) { signer := generateTestSigner(t) exec := NewSSHExecutor("10.99.1.5", SSHConfig{Signer: signer}) err := exec.Upload(context.Background(), strings.NewReader("test"), "/tmp/test", 0644) if err == nil { t.Fatal("expected error when not connected, got nil") } if !strings.Contains(err.Error(), "sftp not connected") { t.Errorf("expected 'sftp not connected' error, got: %v", err) } } func TestSSHExecutorDownloadNotConnected(t *testing.T) { signer := generateTestSigner(t) exec := NewSSHExecutor("10.99.1.5", SSHConfig{Signer: signer}) _, err := exec.Download(context.Background(), "/tmp/test") if err == nil { t.Fatal("expected error when not connected, got nil") } if !strings.Contains(err.Error(), "sftp not connected") { t.Errorf("expected 'sftp not connected' error, got: %v", err) } } func TestSSHExecutorIsConnected(t *testing.T) { signer := generateTestSigner(t) exec := NewSSHExecutor("10.99.1.5", SSHConfig{Signer: signer}) if exec.IsConnected() { t.Error("should not be connected initially") } } func TestSSHExecutorCloseIdempotent(t *testing.T) { signer := generateTestSigner(t) exec := NewSSHExecutor("10.99.1.5", SSHConfig{Signer: signer}) // Close without connecting should not error. if err := exec.Close(); err != nil { t.Errorf("Close on unconnected executor: %v", err) } // Closing twice should also be fine. if err := exec.Close(); err != nil { t.Errorf("second Close: %v", err) } } func TestSSHExecutorConnectTimeout(t *testing.T) { signer := generateTestSigner(t) exec := NewSSHExecutor("10.99.1.5", SSHConfig{ Signer: signer, ConnectTimeout: 100 * time.Millisecond, }) ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond) defer cancel() err := exec.Connect(ctx) if err == nil { t.Fatal("expected connect to fail (no SSH server), got nil") } } func TestSSHExecutorConnectCancelled(t *testing.T) { signer := generateTestSigner(t) exec := NewSSHExecutor("10.99.1.5", SSHConfig{ Signer: signer, ConnectTimeout: 50 * time.Millisecond, }) ctx, cancel := context.WithCancel(context.Background()) cancel() // Cancel immediately. err := exec.Connect(ctx) if err == nil { t.Fatal("expected error with cancelled context, got nil") } if !strings.Contains(err.Error(), "context") { t.Errorf("expected context error, got: %v", err) } } func TestSSHExecutorConcurrentNotConnected(t *testing.T) { signer := generateTestSigner(t) exec := NewSSHExecutor("10.99.1.5", SSHConfig{Signer: signer}) var wg sync.WaitGroup var errCount int32 for i := 0; i < 10; i++ { wg.Add(1) go func() { defer wg.Done() _, err := exec.Exec(context.Background(), "echo test") if err != nil { atomic.AddInt32(&errCount, 1) } }() } wg.Wait() if errCount != 10 { t.Errorf("expected 10 errors (all not connected), got %d", errCount) } } // ============================================================================= // LoadSSHKey / ParseSSHKey Tests // ============================================================================= func TestLoadSSHKeyNotFound(t *testing.T) { _, err := LoadSSHKey("/nonexistent/path/to/key") if err == nil { t.Fatal("expected error for nonexistent key, got nil") } if !strings.Contains(err.Error(), "read SSH key") { t.Errorf("expected 'read SSH key' error, got: %v", err) } } func TestParseSSHKeyInvalid(t *testing.T) { _, err := ParseSSHKey([]byte("not a valid PEM key")) if err == nil { t.Fatal("expected error for invalid key, got nil") } if !strings.Contains(err.Error(), "parse SSH key") { t.Errorf("expected 'parse SSH key' error, got: %v", err) } } func TestParseSSHKeyEmpty(t *testing.T) { _, err := ParseSSHKey([]byte("")) if err == nil { t.Fatal("expected error for empty key, got nil") } } func TestParseSSHKeyValidRSA(t *testing.T) { key, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { t.Fatalf("generate key: %v", err) } // MarshalPrivateKey returns PEM block. pemBytes := pem.EncodeToMemory(&pem.Block{ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key), }) signer, err := ParseSSHKey(pemBytes) if err != nil { t.Fatalf("ParseSSHKey: %v", err) } if signer == nil { t.Fatal("expected non-nil signer") } } // ============================================================================= // Manager Tests // ============================================================================= func TestManagerRequiresSigner(t *testing.T) { _, err := NewManager(Config{}) if err == nil { t.Fatal("expected error when no SSH signer provided") } if !strings.Contains(err.Error(), "SSH signer") { t.Errorf("expected SSH signer error, got: %v", err) } } func TestManagerCreation(t *testing.T) { signer := generateTestSigner(t) mgr, err := NewManager(Config{ Proxmox: ProxmoxConfig{ BaseURL: "https://localhost:8006", TokenID: "test@pve!tok", Secret: "secret", Node: "pve", TemplateID: 9000, }, SSH: SSHConfig{ Signer: signer, User: "admin", ConnectTimeout: 5 * time.Second, CommandTimeout: 30 * time.Second, }, Defaults: ContainerConfig{ CPUs: 2, MemoryMB: 2048, DiskGB: 16, }, }) if err != nil { t.Fatalf("NewManager: %v", err) } if mgr.defaults.CPUs != 2 { t.Errorf("expected defaults.CPUs=2, got %d", mgr.defaults.CPUs) } if mgr.defaults.MemoryMB != 2048 { t.Errorf("expected defaults.MemoryMB=2048, got %d", mgr.defaults.MemoryMB) } if mgr.defaults.DiskGB != 16 { t.Errorf("expected defaults.DiskGB=16, got %d", mgr.defaults.DiskGB) } if mgr.sshCfg.User != "admin" { t.Errorf("expected SSH user=admin, got %q", mgr.sshCfg.User) } } func TestManagerDestroyByID(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() signer := generateTestSigner(t) mgr := &Manager{ proxmox: client, sshKey: signer, sshCfg: SSHConfig{Signer: signer}, } ctx := context.Background() // Container 200 defaults to "stopped", DestroyByID should succeed. if err := mgr.DestroyByID(ctx, 200); err != nil { t.Fatalf("DestroyByID: %v", err) } } // ============================================================================= // Sandbox Tests // ============================================================================= func TestSandboxDestroyClosesConnections(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() if err := sb.Destroy(context.Background()); err != nil { t.Fatalf("Destroy: %v", err) } } func TestSandboxDestroyWithInternet(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() sb.Internet = true if err := sb.Destroy(context.Background()); err != nil { t.Fatalf("Destroy with internet: %v", err) } // Verify execute was called to disable internet. handler.mu.Lock() defer handler.mu.Unlock() found := false for _, req := range handler.requests { if strings.HasSuffix(req.Path, "/execute") { found = true break } } if !found { t.Error("expected execute request to disable internet during destroy") } } func TestSandboxDestroyWithoutInternet(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() sb.Internet = false if err := sb.Destroy(context.Background()); err != nil { t.Fatalf("Destroy without internet: %v", err) } // Verify execute was NOT called (no internet to disable). handler.mu.Lock() defer handler.mu.Unlock() for _, req := range handler.requests { if strings.HasSuffix(req.Path, "/execute") { t.Error("should not call execute when internet is not enabled") break } } } func TestSandboxDestroyIdempotent(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() if err := sb.Destroy(context.Background()); err != nil { t.Fatalf("first Destroy: %v", err) } // Second destroy should still work (container already gone from mock). // It may or may not error depending on whether the mock returns something // for the status check. The important thing is it doesn't panic. _ = sb.Destroy(context.Background()) } func TestSandboxSetInternet(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() ctx := context.Background() // Enable. if err := sb.SetInternet(ctx, true); err != nil { t.Fatalf("SetInternet(true): %v", err) } if !sb.Internet { t.Error("expected Internet=true after enable") } // Disable. if err := sb.SetInternet(ctx, false); err != nil { t.Fatalf("SetInternet(false): %v", err) } if sb.Internet { t.Error("expected Internet=false after disable") } // Toggle: enable again. if err := sb.SetInternet(ctx, true); err != nil { t.Fatalf("SetInternet(true) again: %v", err) } if !sb.Internet { t.Error("expected Internet=true after re-enable") } } func TestSandboxSetInternet_Error(t *testing.T) { handler := newMockProxmoxHandler() handler.failUntil["execute"] = 999 sb, server := newTestSandbox(t, handler) defer server.Close() handler.mu.Lock() handler.callCounts["execute"] = 0 handler.mu.Unlock() err := sb.SetInternet(context.Background(), true) if err == nil { t.Fatal("expected error, got nil") } // Internet flag should NOT have been set. if sb.Internet { t.Error("Internet flag should not be set when enable fails") } } func TestSandboxStatus(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() status, err := sb.Status(context.Background()) if err != nil { t.Fatalf("Status: %v", err) } if status.Status != "running" { t.Errorf("expected status 'running', got %q", status.Status) } if status.CPU != 0.15 { t.Errorf("expected CPU 0.15, got %f", status.CPU) } } func TestSandboxIsConnected(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() // SSH was not actually connected (no real server), so should be false. if sb.IsConnected() { t.Error("expected IsConnected=false (no real SSH)") } } func TestSandboxWriteFileRequiresConnection(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() err := sb.WriteFile(context.Background(), "/tmp/test.txt", "hello") if err == nil { t.Fatal("expected error when SSH not connected") } } func TestSandboxReadFileRequiresConnection(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() _, err := sb.ReadFile(context.Background(), "/tmp/test.txt") if err == nil { t.Fatal("expected error when SSH not connected") } } func TestSandboxExecRequiresConnection(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() _, err := sb.Exec(context.Background(), "echo hello") if err == nil { t.Fatal("expected error when SSH not connected") } } func TestSandboxUploadRequiresConnection(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() err := sb.Upload(context.Background(), strings.NewReader("data"), "/tmp/file", 0644) if err == nil { t.Fatal("expected error when SSH not connected") } } func TestSandboxDownloadRequiresConnection(t *testing.T) { handler := newMockProxmoxHandler() sb, server := newTestSandbox(t, handler) defer server.Close() _, err := sb.Download(context.Background(), "/tmp/file") if err == nil { t.Fatal("expected error when SSH not connected") } } // ============================================================================= // Option Tests // ============================================================================= func TestContainerConfigDefaults(t *testing.T) { o := &createOpts{} if o.cpus != 0 { t.Errorf("expected zero cpus, got %d", o.cpus) } if o.memoryMB != 0 { t.Errorf("expected zero memoryMB, got %d", o.memoryMB) } if o.diskGB != 0 { t.Errorf("expected zero diskGB, got %d", o.diskGB) } if o.hostname != "" { t.Errorf("expected empty hostname, got %q", o.hostname) } if o.internet { t.Error("expected internet=false by default") } } func TestContainerConfigOptions(t *testing.T) { o := &createOpts{} WithCPUs(2)(o) WithMemoryMB(2048)(o) WithDiskGB(16)(o) WithHostname("test")(o) WithInternet(true)(o) if o.cpus != 2 { t.Errorf("expected cpus=2, got %d", o.cpus) } if o.memoryMB != 2048 { t.Errorf("expected memoryMB=2048, got %d", o.memoryMB) } if o.diskGB != 16 { t.Errorf("expected diskGB=16, got %d", o.diskGB) } if o.hostname != "test" { t.Errorf("expected hostname='test', got %q", o.hostname) } if !o.internet { t.Error("expected internet=true") } } func TestContainerConfigOptionsOverwrite(t *testing.T) { o := &createOpts{} // Set then overwrite. WithCPUs(2)(o) WithCPUs(4)(o) if o.cpus != 4 { t.Errorf("expected cpus=4 after overwrite, got %d", o.cpus) } WithInternet(true)(o) WithInternet(false)(o) if o.internet { t.Error("expected internet=false after overwrite") } } func TestExecResultFields(t *testing.T) { t.Run("success", func(t *testing.T) { r := ExecResult{Output: "hello\n", ExitCode: 0} if r.Output != "hello\n" { t.Errorf("unexpected output: %q", r.Output) } if r.ExitCode != 0 { t.Errorf("unexpected exit code: %d", r.ExitCode) } }) t.Run("failure", func(t *testing.T) { r := ExecResult{Output: "command not found\n", ExitCode: 127} if r.ExitCode != 127 { t.Errorf("expected exit code 127, got %d", r.ExitCode) } }) t.Run("empty output", func(t *testing.T) { r := ExecResult{Output: "", ExitCode: 0} if r.Output != "" { t.Errorf("expected empty output, got %q", r.Output) } }) } // ============================================================================= // ContainerStatus Tests // ============================================================================= func TestContainerStatusFields(t *testing.T) { s := ContainerStatus{ Status: "running", CPU: 0.85, Mem: 512 * 1024 * 1024, MaxMem: 1024 * 1024 * 1024, Disk: 2 * 1024 * 1024 * 1024, MaxDisk: 8 * 1024 * 1024 * 1024, NetIn: 100 * 1024 * 1024, NetOut: 50 * 1024 * 1024, Uptime: 3600, } if s.Status != "running" { t.Errorf("expected 'running', got %q", s.Status) } if s.CPU != 0.85 { t.Errorf("expected CPU 0.85, got %f", s.CPU) } if s.Uptime != 3600 { t.Errorf("expected Uptime 3600, got %d", s.Uptime) } } func TestContainerStatusJSON(t *testing.T) { jsonData := `{ "status": "running", "cpu": 0.42, "mem": 268435456, "maxmem": 1073741824, "disk": 1073741824, "maxdisk": 8589934592, "netin": 1048576, "netout": 524288, "uptime": 7200 }` var s ContainerStatus if err := json.Unmarshal([]byte(jsonData), &s); err != nil { t.Fatalf("unmarshal: %v", err) } if s.Status != "running" { t.Errorf("expected 'running', got %q", s.Status) } if s.CPU != 0.42 { t.Errorf("expected CPU 0.42, got %f", s.CPU) } if s.Mem != 268435456 { t.Errorf("expected Mem 268435456, got %d", s.Mem) } if s.MaxMem != 1073741824 { t.Errorf("expected MaxMem 1073741824, got %d", s.MaxMem) } if s.Uptime != 7200 { t.Errorf("expected Uptime 7200, got %d", s.Uptime) } } // ============================================================================= // ProxmoxConfig / ProxmoxClient Tests // ============================================================================= func TestNewProxmoxClient(t *testing.T) { client := NewProxmoxClient(ProxmoxConfig{ BaseURL: "https://proxmox.local:8006", TokenID: "user@pve!token", Secret: "secret", Node: "pve", TemplateID: 9000, Pool: "pool", Bridge: "vmbr1", InsecureSkipVerify: true, }) if client == nil { t.Fatal("expected non-nil client") } if client.config.BaseURL != "https://proxmox.local:8006" { t.Errorf("unexpected BaseURL: %q", client.config.BaseURL) } if client.config.Node != "pve" { t.Errorf("unexpected Node: %q", client.config.Node) } if client.config.TemplateID != 9000 { t.Errorf("unexpected TemplateID: %d", client.config.TemplateID) } } func TestNewProxmoxClient_SecureByDefault(t *testing.T) { client := NewProxmoxClient(ProxmoxConfig{ BaseURL: "https://proxmox.local:8006", }) if client.config.InsecureSkipVerify { t.Error("InsecureSkipVerify should be false by default") } } // ============================================================================= // Concurrent Access Tests // ============================================================================= func TestProxmoxConcurrentRequests(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() var wg sync.WaitGroup var errCount int32 // Fire 20 concurrent NextAvailableID requests. for i := 0; i < 20; i++ { wg.Add(1) go func() { defer wg.Done() _, err := client.NextAvailableID(context.Background()) if err != nil { atomic.AddInt32(&errCount, 1) } }() } wg.Wait() if errCount > 0 { t.Errorf("expected 0 errors, got %d", errCount) } } func TestProxmoxConcurrentContainerOps(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() ctx := context.Background() var wg sync.WaitGroup var errCount int32 // Concurrent start + status on different VMIDs. for i := 200; i < 210; i++ { vmid := i wg.Add(1) go func() { defer wg.Done() if err := client.StartContainer(ctx, vmid); err != nil { atomic.AddInt32(&errCount, 1) return } if _, err := client.GetContainerStatus(ctx, vmid); err != nil { atomic.AddInt32(&errCount, 1) } }() } wg.Wait() if errCount > 0 { t.Errorf("expected 0 errors from concurrent ops, got %d", errCount) } } // ============================================================================= // Attach Tests (Manager.Attach) // ============================================================================= func TestManagerAttach_NotRunning(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() signer := generateTestSigner(t) mgr := &Manager{ proxmox: client, sshKey: signer, sshCfg: SSHConfig{Signer: signer}, } // Container 200 defaults to "stopped". _, err := mgr.Attach(context.Background(), 200) if err == nil { t.Fatal("expected error attaching to stopped container") } if !strings.Contains(err.Error(), "not running") { t.Errorf("expected 'not running' error, got: %v", err) } } func TestManagerAttach_StatusError(t *testing.T) { handler := newMockProxmoxHandler() handler.failUntil["status/current"] = 999 client, server := newTestProxmoxClient(t, handler) defer server.Close() signer := generateTestSigner(t) mgr := &Manager{ proxmox: client, sshKey: signer, sshCfg: SSHConfig{Signer: signer}, } _, err := mgr.Attach(context.Background(), 200) if err == nil { t.Fatal("expected error, got nil") } } // ============================================================================= // Request Tracking / Verification Tests // ============================================================================= func TestProxmoxCloneTemplate_RequestPath(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() if err := client.CloneTemplate(context.Background(), 201, ContainerConfig{Hostname: "mybox"}); err != nil { t.Fatalf("CloneTemplate: %v", err) } handler.mu.Lock() defer handler.mu.Unlock() found := false for _, req := range handler.requests { if strings.Contains(req.Path, "/lxc/9000/clone") { found = true break } } if !found { t.Error("expected clone request to use template ID 9000 in path") } } func TestProxmoxStartContainer_RequestPath(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() if err := client.StartContainer(context.Background(), 201); err != nil { t.Fatalf("StartContainer: %v", err) } handler.mu.Lock() defer handler.mu.Unlock() found := false for _, req := range handler.requests { if strings.Contains(req.Path, "/lxc/201/status/start") { found = true break } } if !found { t.Error("expected start request to use VMID 201 in path") } } func TestProxmoxDestroyContainer_RequestPath(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() // Destroy on VMID 202 (defaults to stopped, skip stop step). if err := client.DestroyContainer(context.Background(), 202); err != nil { t.Fatalf("DestroyContainer: %v", err) } handler.mu.Lock() defer handler.mu.Unlock() found := false for _, req := range handler.requests { if req.Method == http.MethodDelete && strings.Contains(req.Path, "/lxc/202") { found = true break } } if !found { t.Error("expected DELETE request for VMID 202") } } func TestProxmoxConfigureContainer_BridgeInRequest(t *testing.T) { handler := newMockProxmoxHandler() client, server := newTestProxmoxClient(t, handler) defer server.Close() if err := client.ConfigureContainer(context.Background(), 200, ContainerConfig{CPUs: 1}); err != nil { t.Fatalf("ConfigureContainer: %v", err) } handler.mu.Lock() defer handler.mu.Unlock() for _, req := range handler.requests { if strings.HasSuffix(req.Path, "/config") { net0 := req.Form.Get("net0") if !strings.Contains(net0, "vmbr1") { t.Errorf("expected net0 to contain bridge name 'vmbr1', got %q", net0) } if !strings.Contains(net0, "ip=dhcp") { t.Errorf("expected net0 to contain ip=dhcp, got %q", net0) } break } } }