diff --git a/architecture/object-metadata.md b/architecture/object-metadata.md new file mode 100644 index 000000000..2697a3328 --- /dev/null +++ b/architecture/object-metadata.md @@ -0,0 +1,417 @@ +# Object Metadata Convention + +## Overview + +OpenShell adopts a Kubernetes-style object metadata convention for all top-level domain objects. This standardizes how resources are identified, labeled, and queried across the platform. All resources that users interact with directly (Sandbox, Provider, SshSession, InferenceRoute) follow this convention. + +## Core Principles + +### 1. Uniform Metadata Structure + +All top-level objects embed a common `ObjectMeta` message containing: + +- **Stable ID**: Server-generated UUID that never changes +- **Human-readable name**: User-friendly identifier (unique per object type) +- **Creation timestamp**: Milliseconds since Unix epoch +- **Labels**: Key-value pairs for filtering and organization + +### 2. Trait-Based Access + +Rather than accessing metadata fields directly (e.g., `sandbox.metadata.as_ref().unwrap().id`), code uses trait methods from `openshell_core::metadata`: + +```rust +use openshell_core::{ObjectId, ObjectName, ObjectLabels}; + +let id = sandbox.object_id(); // Returns &str +let name = sandbox.object_name(); // Returns &str +let labels = sandbox.object_labels(); // Returns Option> +``` + +This provides: +- **Uniform API** across all object types +- **Graceful fallback** (returns empty string if metadata is None) +- **Reduced boilerplate** in code that works with multiple object types + +### 3. Labels for Organization and Filtering + +Labels are key-value metadata attached to objects for: + +- **Grouping** related resources (e.g., all dev environment sandboxes) +- **Filtering** in list operations (e.g., show only sandboxes with `team=backend`) +- **Automation** and selection in scripts + +## Implementation Pattern + +### Protobuf Definition + +Define `ObjectMeta` once in `proto/datamodel.proto`: + +```protobuf +message ObjectMeta { + string id = 1; + string name = 2; + int64 created_at_ms = 3; + map labels = 4; +} +``` + +Embed it in top-level objects: + +```protobuf +message Sandbox { + ObjectMeta metadata = 1; + SandboxSpec spec = 2; + SandboxStatus status = 3; + int32 phase = 4; + int32 current_policy_version = 5; +} +``` + +**Migration**: When adding metadata to an existing object, shift field numbers to make room for `metadata = 1`. This maintains backward compatibility if done before release. + +### Trait Implementation + +Implement the three traits for each object in `crates/openshell-core/src/metadata.rs`: + +```rust +impl ObjectId for Sandbox { + fn object_id(&self) -> &str { + self.metadata.as_ref().map(|m| m.id.as_str()).unwrap_or("") + } +} + +impl ObjectName for Sandbox { + fn object_name(&self) -> &str { + self.metadata.as_ref().map(|m| m.name.as_str()).unwrap_or("") + } +} + +impl ObjectLabels for Sandbox { + fn object_labels(&self) -> Option> { + self.metadata.as_ref().map(|m| m.labels.clone()) + } +} +``` + +**Pattern**: Always return empty string for missing metadata rather than panicking. This makes code resilient to malformed data. + +### Persistence Layer + +The `Store` trait in `crates/openshell-server/src/persistence/mod.rs` provides three methods for working with objects: + +```rust +// Store/retrieve by stable ID +async fn put_message( + &self, + message: &T, +) -> Result<(), String>; + +async fn get(&self, object_type: &str, id: &str) + -> Result, String>; + +// Retrieve by human-readable name +async fn get_message_by_name( + &self, + name: &str, +) -> Result, String>; +``` + +**Database schema pattern**: Each object type has: +- `id` column (TEXT PRIMARY KEY) — stable UUID +- `name` column (TEXT UNIQUE NOT NULL) — user-facing name +- `payload` column (BLOB) — serialized protobuf +- `created_at_ms` column (INTEGER) — denormalized from metadata for indexing +- `updated_at_ms` column (INTEGER) — last modification time + +### Label Filtering + +Label selectors follow Kubernetes conventions: + +**Format**: `key1=value1,key2=value2` (comma-separated, AND logic) + +**Implementation**: +1. Parse selector into key-value pairs +2. For each object, check that ALL selector labels match +3. Return only objects where every label in the selector exists with the exact value + +**SQL pattern** (PostgreSQL with JSONB): + +```sql +WHERE labels @> '{"env": "dev", "team": "backend"}'::jsonb +``` + +**SQL pattern** (SQLite): + +```sql +WHERE json_extract(labels, '$.env') = 'dev' + AND json_extract(labels, '$.team') = 'backend' +``` + +The `list_with_selector` method on `Store` handles this transparently. + +### Validation Rules + +Labels must follow Kubernetes naming conventions (enforced in `crates/openshell-server/src/grpc/validation.rs`): + +**Label keys**: +- Optional prefix + `/` + name (e.g., `example.com/app` or `app`) +- Prefix: DNS subdomain (lowercase alphanumeric, `-`, `.`, max 253 chars) +- Name: alphanumeric + `-`, `_`, `.`, max 63 chars +- Cannot start or end with `-` or `.` + +**Label values**: +- Alphanumeric + `-`, `_`, `.` +- Max 63 characters +- Can be empty string + +**Validation functions**: +```rust +validate_label_key(key: &str) -> Result<(), Status> +validate_label_value(value: &str) -> Result<(), Status> +validate_labels(labels: &HashMap) -> Result<(), Status> +``` + +**Validation timing**: Validate at API ingress (gRPC handlers) before persisting. Reject invalid labels immediately rather than storing and failing later. + +## CLI Integration + +### Creating Objects with Labels + +```bash +openshell sandbox create --label env=dev --label team=backend +openshell provider create openai --label project=research +``` + +**Pattern**: Repeatable `--label key=value` flags parsed into `HashMap`. + +### Listing with Selectors + +```bash +openshell sandbox list --selector env=dev +openshell sandbox list --selector env=dev,team=backend +``` + +**Display**: Show labels in tabular output when present, or in detail views. + +## Testing Requirements + +### Unit Tests + +Test validation logic for: +- Valid label keys (with and without prefix) +- Invalid keys (bad characters, too long, empty segments) +- Valid label values +- Invalid values (non-alphanumeric, too long) +- Selector parsing + +### Integration Tests + +Test persistence layer: +- Store object with labels +- Retrieve by name and verify labels present +- Filter with single-label selector +- Filter with multi-label selector (AND logic) +- Empty results for non-matching selector + +### E2E Tests + +Test full workflow through CLI: +- Create multiple objects with different labels +- List all objects +- Filter by single label +- Filter by multiple labels +- Verify labels persist across gateway restarts + +**Location**: `e2e/rust/tests/sandbox_labels.rs` (or equivalent for each object type) + +## Migration Checklist + +When adding object metadata to a new resource type: + +1. **Proto changes**: + - [ ] Add `ObjectMeta metadata = 1;` field + - [ ] Shift existing field numbers if needed + - [ ] Update any references to old id/name fields + +2. **Trait implementations**: + - [ ] Implement `ObjectId` trait + - [ ] Implement `ObjectName` trait + - [ ] Implement `ObjectLabels` trait + - [ ] Add to `crates/openshell-core/src/metadata.rs` + +3. **Persistence**: + - [ ] Add database migration (SQLite + PostgreSQL) + - [ ] Create `labels` column (JSON/JSONB type) + - [ ] Migrate existing `id`/`name` to `ObjectMeta` + - [ ] Update `ObjectType` implementation + - [ ] Update create/read operations to use new structure + +4. **Validation**: + - [ ] Add label validation in gRPC handlers + - [ ] Validate on create and update operations + - [ ] Test validation with unit tests + +5. **API updates**: + - [ ] Add `label_selector` parameter to List RPC + - [ ] Implement selector filtering in persistence layer + - [ ] Add `labels` field to Create/Update RPCs + +6. **CLI updates**: + - [ ] Add `--label` flag to create command + - [ ] Add `--selector` flag to list command + - [ ] Update completion for label keys (if applicable) + - [ ] Display labels in list and get output + +7. **Tests**: + - [ ] Unit tests for validation + - [ ] Integration tests for persistence + - [ ] E2E tests for CLI workflow + +8. **Documentation**: + - [ ] Update user-facing docs for new flags + - [ ] Add examples with labels to guides + +## Common Patterns + +### Creating Objects with Metadata + +```rust +use crate::persistence::current_time_ms; + +let now_ms = current_time_ms() + .map_err(|e| Status::internal(format!("get current time: {e}")))?; + +let sandbox = Sandbox { + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: uuid::Uuid::new_v4().to_string(), + name: user_provided_name, + created_at_ms: now_ms, + labels: request.labels, + }), + spec: Some(spec), + status: None, + phase: SandboxPhase::Provisioning as i32, + current_policy_version: 0, +}; + +// Validate before persisting +validate_object_metadata(sandbox.metadata.as_ref(), "sandbox")?; +store.put_message(&sandbox).await?; +``` + +### Filtering by Labels + +```rust +let sandboxes = if request.label_selector.is_empty() { + store.list(Sandbox::object_type(), limit, offset).await? +} else { + validate_label_selector(&request.label_selector)?; + store.list_with_selector( + Sandbox::object_type(), + &request.label_selector, + limit, + offset, + ).await? +}; +``` + +### Accessing Metadata Fields + +```rust +use openshell_core::{ObjectId, ObjectName}; + +// Good: trait-based access +let sandbox_id = sandbox.object_id(); +let sandbox_name = sandbox.object_name(); + +// Avoid: direct field access +let sandbox_id = sandbox.metadata.as_ref().unwrap().id.as_str(); // Don't do this +``` + +## Anti-Patterns to Avoid + +### ❌ Bypassing Validation + +```rust +// Bad: storing labels without validation +store.put_message(&sandbox).await?; +``` + +```rust +// Good: validate before storing +validate_labels(&sandbox.metadata.as_ref().unwrap().labels)?; +store.put_message(&sandbox).await?; +``` + +### ❌ Direct Field Access + +```rust +// Bad: fragile to missing metadata +let id = sandbox.metadata.as_ref().unwrap().id.clone(); +``` + +```rust +// Good: trait-based with fallback +let id = sandbox.object_id().to_string(); +``` + +### ❌ Inconsistent Object Construction + +```rust +// Bad: forgetting created_at_ms or labels +let sandbox = Sandbox { + metadata: Some(ObjectMeta { + id: uuid::Uuid::new_v4().to_string(), + name: "test".to_string(), + ..Default::default() // Silently sets created_at_ms=0, labels=empty + }), + ..Default::default() +}; +``` + +```rust +// Good: explicit fields +let sandbox = Sandbox { + metadata: Some(ObjectMeta { + id: uuid::Uuid::new_v4().to_string(), + name: "test".to_string(), + created_at_ms: current_time_ms()?, + labels: request.labels, + }), + ..Default::default() +}; +``` + +### ❌ Client-Side ID Generation + +```rust +// Bad: letting clients specify IDs +let sandbox = Sandbox { + metadata: Some(ObjectMeta { + id: request.id, // Never trust client-provided IDs + .. + }), + .. +}; +``` + +```rust +// Good: server generates stable IDs +let sandbox = Sandbox { + metadata: Some(ObjectMeta { + id: uuid::Uuid::new_v4().to_string(), + .. + }), + .. +}; +``` + +## References + +- **Kubernetes API Conventions**: https://kubernetes.io/docs/reference/using-api/api-concepts/ +- **Label Syntax**: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +- **Proto definition**: `proto/datamodel.proto` +- **Trait implementations**: `crates/openshell-core/src/metadata.rs` +- **Persistence layer**: `crates/openshell-server/src/persistence/mod.rs` +- **Validation logic**: `crates/openshell-server/src/grpc/validation.rs` +- **E2E tests**: `e2e/rust/tests/sandbox_labels.rs` diff --git a/crates/openshell-cli/src/completers.rs b/crates/openshell-cli/src/completers.rs index 3c2a8b336..257ceb9ff 100644 --- a/crates/openshell-cli/src/completers.rs +++ b/crates/openshell-cli/src/completers.rs @@ -7,6 +7,7 @@ use std::time::Duration; use clap_complete::engine::CompletionCandidate; use openshell_bootstrap::{list_gateways, load_active_gateway, load_gateway_metadata}; +use openshell_core::ObjectName; use openshell_core::proto::open_shell_client::OpenShellClient; use openshell_core::proto::{ListProvidersRequest, ListSandboxesRequest}; use tonic::transport::{Channel, Endpoint}; @@ -33,6 +34,7 @@ pub fn complete_sandbox_names(_prefix: &OsStr) -> Vec { .list_sandboxes(ListSandboxesRequest { limit: 200, offset: 0, + label_selector: String::new(), }) .await .ok()?; @@ -41,7 +43,7 @@ pub fn complete_sandbox_names(_prefix: &OsStr) -> Vec { .into_inner() .sandboxes .into_iter() - .map(|s| CompletionCandidate::new(s.name)) + .map(|s| CompletionCandidate::new(s.object_name())) .collect(), ) }) @@ -64,7 +66,7 @@ pub fn complete_provider_names(_prefix: &OsStr) -> Vec { .into_inner() .providers .into_iter() - .map(|p| CompletionCandidate::new(p.name)) + .map(|p| CompletionCandidate::new(p.object_name())) .collect(), ) }) diff --git a/crates/openshell-cli/src/main.rs b/crates/openshell-cli/src/main.rs index 6239978d7..5c9865043 100644 --- a/crates/openshell-cli/src/main.rs +++ b/crates/openshell-cli/src/main.rs @@ -1197,6 +1197,10 @@ enum SandboxCommands { #[arg(long, overrides_with = "auto_providers")] no_auto_providers: bool, + /// Attach labels to the sandbox (key=value format, repeatable). + #[arg(long = "label")] + labels: Vec, + /// Command to run after "--" (defaults to an interactive shell). #[arg(trailing_var_arg = true)] command: Vec, @@ -1232,6 +1236,10 @@ enum SandboxCommands { /// Print only sandbox names (one per line). #[arg(long, conflicts_with = "ids")] names: bool, + + /// Filter sandboxes by label selector (key1=value1,key2=value2). + #[arg(long)] + selector: Option, }, /// Delete a sandbox by name. @@ -2293,6 +2301,7 @@ async fn main() -> Result<()> { no_bootstrap, auto_providers, no_auto_providers, + labels, command, } => { // Resolve --tty / --no-tty into an Option override. @@ -2323,6 +2332,19 @@ async fn main() -> Result<()> { None // prompt or auto-detect }; + // Parse --label flags into a HashMap. + let mut labels_map = std::collections::HashMap::new(); + for label_str in &labels { + let parts: Vec<&str> = label_str.splitn(2, '=').collect(); + if parts.len() != 2 { + return Err(miette::miette!( + "invalid label format '{}', expected key=value", + label_str + )); + } + labels_map.insert(parts[0].to_string(), parts[1].to_string()); + } + // Parse --upload spec into (local_path, sandbox_path, git_ignore). let upload_spec = upload.as_deref().map(|s| { let (local, remote) = parse_upload_spec(s); @@ -2373,6 +2395,7 @@ async fn main() -> Result<()> { tty_override, Some(false), auto_providers_override, + &labels_map, &tls, )) .await?; @@ -2474,8 +2497,18 @@ async fn main() -> Result<()> { offset, ids, names, + selector, } => { - run::sandbox_list(endpoint, limit, offset, ids, names, &tls).await?; + run::sandbox_list( + endpoint, + limit, + offset, + ids, + names, + selector.as_deref(), + &tls, + ) + .await?; } SandboxCommands::Delete { names, all } => { run::sandbox_delete(endpoint, &names, all, &tls, &ctx.name).await?; diff --git a/crates/openshell-cli/src/run.rs b/crates/openshell-cli/src/run.rs index ba25488b8..87489014a 100644 --- a/crates/openshell-cli/src/run.rs +++ b/crates/openshell-cli/src/run.rs @@ -34,6 +34,7 @@ use openshell_core::proto::{ UpdateProviderRequest, WatchSandboxRequest, exec_sandbox_event, setting_value, }; use openshell_core::settings::{self, SettingValueKind}; +use openshell_core::{ObjectId, ObjectName}; use openshell_providers::{ ProviderRegistry, detect_provider_from_command, normalize_provider_type, }; @@ -403,7 +404,7 @@ fn print_sandbox_header(sandbox: &Sandbox, display: Option<&ProvisioningDisplay> format!( "{} {}", "Created sandbox:".cyan().bold(), - sandbox.name.bold() + sandbox.object_name().bold() ), String::new(), ]; @@ -1963,6 +1964,7 @@ pub async fn sandbox_create_with_bootstrap( tty_override, Some(false), auto_providers_override, + &std::collections::HashMap::new(), &tls, ) .await @@ -2018,6 +2020,7 @@ pub async fn sandbox_create( tty_override: Option, bootstrap_override: Option, auto_providers_override: Option, + labels: &std::collections::HashMap, tls: &TlsOptions, ) -> Result<()> { if editor.is_some() && !command.is_empty() { @@ -2120,6 +2123,7 @@ pub async fn sandbox_create( ..SandboxSpec::default() }), name: name.unwrap_or_default().to_string(), + labels: labels.clone(), }; let response = match client.create_sandbox(request).await { @@ -2139,7 +2143,11 @@ pub async fn sandbox_create( let interactive = std::io::stdout().is_terminal(); let persist = sandbox_should_persist(keep, forward.as_ref()); - let sandbox_name = sandbox.name.clone(); + let sandbox_name = if sandbox.object_name().is_empty() { + "unknown".to_string() + } else { + sandbox.object_name().to_string() + }; // Record this sandbox as the last-used for the active gateway only when it // is expected to persist beyond the initial session. @@ -2174,9 +2182,14 @@ pub async fn sandbox_create( // a newly created sandbox. Instead we handle termination client-side: // we wait until we have observed at least one non-Ready phase followed // by Ready (a genuine Provisioning → Ready transition). + let sandbox_id = if sandbox.object_id().is_empty() { + "unknown".to_string() + } else { + sandbox.object_id().to_string() + }; let mut stream = client .watch_sandbox(WatchSandboxRequest { - id: sandbox.id.clone(), + id: sandbox_id.clone(), follow_status: true, follow_logs: true, follow_events: true, @@ -2757,10 +2770,14 @@ pub async fn sandbox_get( .sandbox .ok_or_else(|| miette::miette!("sandbox missing from response"))?; + let sandbox_id = if sandbox.object_id().is_empty() { + return Err(miette::miette!("sandbox missing metadata")); + } else { + sandbox.object_id().to_string() + }; + let config = client - .get_sandbox_config(GetSandboxConfigRequest { - sandbox_id: sandbox.id.clone(), - }) + .get_sandbox_config(GetSandboxConfigRequest { sandbox_id }) .await .into_diagnostic()? .into_inner(); @@ -2779,11 +2796,32 @@ pub async fn sandbox_get( println!("{}", "Sandbox:".cyan().bold()); println!(); - println!(" {} {}", "Id:".dimmed(), sandbox.id); - println!(" {} {}", "Name:".dimmed(), sandbox.name); - println!(" {} {}", "Namespace:".dimmed(), sandbox.namespace); + let id = if sandbox.object_id().is_empty() { + "unknown" + } else { + sandbox.object_id() + }; + let name = if sandbox.object_name().is_empty() { + "unknown" + } else { + sandbox.object_name() + }; + println!(" {} {}", "Id:".dimmed(), id); + println!(" {} {}", "Name:".dimmed(), name); println!(" {} {}", "Phase:".dimmed(), phase_name(sandbox.phase)); + // Display labels if present + if let Some(metadata) = &sandbox.metadata { + if !metadata.labels.is_empty() { + println!(" {} ", "Labels:".dimmed()); + let mut labels: Vec<_> = metadata.labels.iter().collect(); + labels.sort_by_key(|(k, _)| *k); + for (key, value) in labels { + println!(" {}: {}", key, value); + } + } + } + let policy_from_global = config.policy_source == PolicySource::Global as i32; println!( " {} {}", @@ -2889,7 +2927,7 @@ pub async fn sandbox_exec_grpc( // Make the streaming gRPC call. let mut stream = client .exec_sandbox(ExecSandboxRequest { - sandbox_id: sandbox.id, + sandbox_id: sandbox.object_id().to_string(), command: command.to_vec(), workdir: workdir.unwrap_or_default().to_string(), environment: HashMap::new(), @@ -2993,12 +3031,17 @@ pub async fn sandbox_list( offset: u32, ids_only: bool, names_only: bool, + label_selector: Option<&str>, tls: &TlsOptions, ) -> Result<()> { let mut client = grpc_client(server, tls).await?; let response = client - .list_sandboxes(ListSandboxesRequest { limit, offset }) + .list_sandboxes(ListSandboxesRequest { + limit, + offset, + label_selector: label_selector.unwrap_or("").to_string(), + }) .await .into_diagnostic()?; @@ -3012,14 +3055,14 @@ pub async fn sandbox_list( if ids_only { for sandbox in sandboxes { - println!("{}", sandbox.id); + println!("{}", sandbox.object_id()); } return Ok(()); } if names_only { for sandbox in sandboxes { - println!("{}", sandbox.name); + println!("{}", sandbox.object_name().to_string()); } return Ok(()); } @@ -3027,23 +3070,16 @@ pub async fn sandbox_list( // Calculate column widths let name_width = sandboxes .iter() - .map(|s| s.name.len()) + .map(|s| s.object_name().len()) .max() .unwrap_or(4) .max(4); - let ns_width = sandboxes - .iter() - .map(|s| s.namespace.len()) - .max() - .unwrap_or(9) - .max(9); let created_width = 19; // "YYYY-MM-DD HH:MM:SS" // Print header println!( - "{: phase.dimmed().to_string(), _ => phase.to_string(), }; - let created = format_epoch_ms(sandbox.created_at_ms); + let created = format_epoch_ms( + sandbox + .metadata + .as_ref() + .map(|m| m.created_at_ms) + .unwrap_or(0), + ); println!( - "{: Result< println!("{}", "Provider:".cyan().bold()); println!(); - println!(" {} {}", "Id:".dimmed(), provider.id); - println!(" {} {}", "Name:".dimmed(), provider.name); + println!(" {} {}", "Id:".dimmed(), provider.object_id().to_string()); + println!( + " {} {}", + "Name:".dimmed(), + provider.object_name().to_string() + ); println!(" {} {}", "Type:".dimmed(), provider.r#type); println!( " {} {}", @@ -3595,14 +3663,14 @@ pub async fn provider_list( if names_only { for provider in providers { - println!("{}", provider.name); + println!("{}", provider.object_name().to_string()); } return Ok(()); } let name_width = providers .iter() - .map(|provider| provider.name.len()) + .map(|provider| provider.object_name().len()) .max() .unwrap_or(4) .max(4); @@ -3624,7 +3692,7 @@ pub async fn provider_list( for provider in providers { println!( "{: &str; +} + +/// Provides access to the object's human-readable name. +pub trait ObjectName { + fn object_name(&self) -> &str; +} + +/// Provides access to the object's labels (key-value metadata). +pub trait ObjectLabels { + fn object_labels(&self) -> Option>; +} + +// Implementations for Sandbox +impl ObjectId for Sandbox { + fn object_id(&self) -> &str { + self.metadata.as_ref().map(|m| m.id.as_str()).unwrap_or("") + } +} + +impl ObjectName for Sandbox { + fn object_name(&self) -> &str { + self.metadata + .as_ref() + .map(|m| m.name.as_str()) + .unwrap_or("") + } +} + +impl ObjectLabels for Sandbox { + fn object_labels(&self) -> Option> { + self.metadata.as_ref().map(|m| m.labels.clone()) + } +} + +// Implementations for Provider +impl ObjectId for Provider { + fn object_id(&self) -> &str { + self.metadata.as_ref().map(|m| m.id.as_str()).unwrap_or("") + } +} + +impl ObjectName for Provider { + fn object_name(&self) -> &str { + self.metadata + .as_ref() + .map(|m| m.name.as_str()) + .unwrap_or("") + } +} + +impl ObjectLabels for Provider { + fn object_labels(&self) -> Option> { + self.metadata.as_ref().map(|m| m.labels.clone()) + } +} + +// Implementations for SshSession +impl ObjectId for SshSession { + fn object_id(&self) -> &str { + self.metadata.as_ref().map(|m| m.id.as_str()).unwrap_or("") + } +} + +impl ObjectName for SshSession { + fn object_name(&self) -> &str { + self.metadata + .as_ref() + .map(|m| m.name.as_str()) + .unwrap_or("") + } +} + +impl ObjectLabels for SshSession { + fn object_labels(&self) -> Option> { + self.metadata.as_ref().map(|m| m.labels.clone()) + } +} + +// Implementations for InferenceRoute +impl ObjectId for InferenceRoute { + fn object_id(&self) -> &str { + self.metadata.as_ref().map(|m| m.id.as_str()).unwrap_or("") + } +} + +impl ObjectName for InferenceRoute { + fn object_name(&self) -> &str { + self.metadata + .as_ref() + .map(|m| m.name.as_str()) + .unwrap_or("") + } +} + +impl ObjectLabels for InferenceRoute { + fn object_labels(&self) -> Option> { + self.metadata.as_ref().map(|m| m.labels.clone()) + } +} + +// Implementations for ObjectForTest (test-only proto type) +impl ObjectId for ObjectForTest { + fn object_id(&self) -> &str { + &self.id + } +} + +impl ObjectName for ObjectForTest { + fn object_name(&self) -> &str { + &self.name + } +} + +impl ObjectLabels for ObjectForTest { + fn object_labels(&self) -> Option> { + None + } +} diff --git a/crates/openshell-server/migrations/postgres/004_add_object_metadata.sql b/crates/openshell-server/migrations/postgres/004_add_object_metadata.sql new file mode 100644 index 000000000..1f222469b --- /dev/null +++ b/crates/openshell-server/migrations/postgres/004_add_object_metadata.sql @@ -0,0 +1,5 @@ +-- Add labels column to support Kubernetes-style object metadata +ALTER TABLE objects ADD COLUMN labels JSONB; + +-- Backfill existing rows with empty labels +UPDATE objects SET labels = '{}'::jsonb WHERE labels IS NULL; diff --git a/crates/openshell-server/migrations/sqlite/004_add_object_metadata.sql b/crates/openshell-server/migrations/sqlite/004_add_object_metadata.sql new file mode 100644 index 000000000..31536c153 --- /dev/null +++ b/crates/openshell-server/migrations/sqlite/004_add_object_metadata.sql @@ -0,0 +1,5 @@ +-- Add labels column to support Kubernetes-style object metadata +ALTER TABLE objects ADD COLUMN labels TEXT; + +-- Backfill existing rows with empty labels +UPDATE objects SET labels = '{}' WHERE labels IS NULL; diff --git a/crates/openshell-server/src/compute/mod.rs b/crates/openshell-server/src/compute/mod.rs index 35c72f80c..af3387e9a 100644 --- a/crates/openshell-server/src/compute/mod.rs +++ b/crates/openshell-server/src/compute/mod.rs @@ -292,13 +292,13 @@ impl ComputeRuntime { pub async fn create_sandbox(&self, sandbox: Sandbox) -> Result { let existing = self .store - .get_message_by_name::(&sandbox.name) + .get_message_by_name::(sandbox.object_name()) .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))?; if existing.is_some() { return Err(Status::already_exists(format!( "sandbox '{}' already exists", - sandbox.name + sandbox.object_name() ))); } @@ -317,22 +317,31 @@ impl ComputeRuntime { .await { Ok(_) => { - self.sandbox_watch_bus.notify(&sandbox.id); + self.sandbox_watch_bus.notify(sandbox.object_id()); Ok(sandbox) } Err(status) if status.code() == Code::AlreadyExists => { - let _ = self.store.delete(Sandbox::object_type(), &sandbox.id).await; - self.sandbox_index.remove_sandbox(&sandbox.id); + let _ = self + .store + .delete(Sandbox::object_type(), sandbox.object_id()) + .await; + self.sandbox_index.remove_sandbox(sandbox.object_id()); Err(Status::already_exists("sandbox already exists")) } Err(status) if status.code() == Code::FailedPrecondition => { - let _ = self.store.delete(Sandbox::object_type(), &sandbox.id).await; - self.sandbox_index.remove_sandbox(&sandbox.id); + let _ = self + .store + .delete(Sandbox::object_type(), sandbox.object_id()) + .await; + self.sandbox_index.remove_sandbox(sandbox.object_id()); Err(Status::failed_precondition(status.message().to_string())) } Err(err) => { - let _ = self.store.delete(Sandbox::object_type(), &sandbox.id).await; - self.sandbox_index.remove_sandbox(&sandbox.id); + let _ = self + .store + .delete(Sandbox::object_type(), sandbox.object_id()) + .await; + self.sandbox_index.remove_sandbox(sandbox.object_id()); Err(Status::internal(format!( "create sandbox failed: {}", err.message() @@ -352,7 +361,7 @@ impl ComputeRuntime { return Err(Status::not_found("sandbox not found")); }; - let id = sandbox.id.clone(); + let id = sandbox.object_id().to_string(); sandbox.phase = SandboxPhase::Deleting as i32; self.store .put_message(&sandbox) @@ -367,11 +376,11 @@ impl ComputeRuntime { && session.sandbox_id == id && let Err(e) = self .store - .delete(SshSession::object_type(), &session.id) + .delete(SshSession::object_type(), session.object_id()) .await { warn!( - session_id = %session.id, + session_id = %session.object_id(), error = %e, "Failed to delete SSH session during sandbox cleanup" ); @@ -504,7 +513,7 @@ impl ComputeRuntime { } }; - if backend_ids.contains(&sandbox.id) { + if backend_ids.contains(sandbox.object_id()) { continue; } @@ -573,19 +582,26 @@ impl ComputeRuntime { let session_connected = self.supervisor_sessions.has_session(&incoming.id); let mut phase = derive_phase(incoming.status.as_ref()); - let mut sandbox = existing.unwrap_or_else(|| Sandbox { - id: incoming.id.clone(), - name: incoming.name.clone(), - namespace: incoming.namespace.clone(), - spec: None, - status: None, - phase: SandboxPhase::Unknown as i32, - ..Default::default() + let mut sandbox = existing.unwrap_or_else(|| { + use crate::persistence::current_time_ms; + let now_ms = current_time_ms().unwrap_or(0); + Sandbox { + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: incoming.id.clone(), + name: incoming.name.clone(), + created_at_ms: now_ms, + labels: std::collections::HashMap::new(), + }), + spec: None, + status: None, + phase: SandboxPhase::Unknown as i32, + current_policy_version: 0, + } }); if session_connected && matches!(phase, SandboxPhase::Provisioning | SandboxPhase::Unknown) { - ensure_supervisor_ready_status(&mut status, &sandbox.name); + ensure_supervisor_ready_status(&mut status, sandbox.object_name()); phase = SandboxPhase::Ready; } @@ -619,8 +635,11 @@ impl ComputeRuntime { } } - sandbox.name = incoming.name; - sandbox.namespace = incoming.namespace; + // Update metadata fields + if let Some(metadata) = sandbox.metadata.as_mut() { + metadata.name = incoming.name; + } + // Note: namespace field removed from public Sandbox API - it remains internal to DriverSandbox sandbox.status = status; sandbox.phase = phase as i32; @@ -633,7 +652,7 @@ impl ComputeRuntime { .put_message(&sandbox) .await .map_err(|e| e.to_string())?; - self.sandbox_watch_bus.notify(&sandbox.id); + self.sandbox_watch_bus.notify(sandbox.object_id()); Ok(()) } @@ -667,11 +686,12 @@ impl ComputeRuntime { return Ok(()); } + let sandbox_name = sandbox.object_name().to_string(); if connected { - ensure_supervisor_ready_status(&mut sandbox.status, &sandbox.name); + ensure_supervisor_ready_status(&mut sandbox.status, &sandbox_name); sandbox.phase = SandboxPhase::Ready as i32; } else if current_phase == SandboxPhase::Ready { - ensure_supervisor_not_ready_status(&mut sandbox.status, &sandbox.name); + ensure_supervisor_not_ready_status(&mut sandbox.status, &sandbox_name); sandbox.phase = SandboxPhase::Provisioning as i32; } else { return Ok(()); @@ -765,19 +785,22 @@ impl ComputeRuntime { return Ok(()); } - if let Some(current) = self.get_driver_sandbox(&sandbox.id, &sandbox.name).await? { + if let Some(current) = self + .get_driver_sandbox(sandbox.object_id(), sandbox.object_name()) + .await? + { return self .apply_sandbox_update_locked(current, Some(current_record)) .await; } info!( - sandbox_id = %sandbox.id, - sandbox_name = %sandbox.name, + sandbox_id = %sandbox.object_id(), + sandbox_name = %sandbox.object_name(), age_secs = age_ms / 1000, "Removing sandbox from store after it disappeared from the compute driver snapshot" ); - self.apply_deleted_locked(&sandbox.id).await + self.apply_deleted_locked(sandbox.object_id()).await } async fn get_driver_sandbox( @@ -802,9 +825,9 @@ impl ComputeRuntime { fn driver_sandbox_from_public(sandbox: &Sandbox) -> DriverSandbox { DriverSandbox { - id: sandbox.id.clone(), - name: sandbox.name.clone(), - namespace: sandbox.namespace.clone(), + id: sandbox.object_id().to_string(), + name: sandbox.object_name().to_string(), + namespace: String::new(), // Namespace is set by the driver based on its config spec: sandbox.spec.as_ref().map(driver_sandbox_spec_from_public), status: sandbox .status @@ -980,18 +1003,6 @@ impl ObjectType for Sandbox { } } -impl ObjectId for Sandbox { - fn object_id(&self) -> &str { - &self.id - } -} - -impl ObjectName for Sandbox { - fn object_name(&self) -> &str { - &self.name - } -} - fn compute_error_from_status(status: Status) -> ComputeError { match status.code() { Code::AlreadyExists => ComputeError::AlreadyExists, @@ -1294,9 +1305,12 @@ mod tests { fn sandbox_record(id: &str, name: &str, phase: SandboxPhase) -> Sandbox { Sandbox { - id: id.to_string(), - name: name.to_string(), - namespace: "default".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: id.to_string(), + name: name.to_string(), + created_at_ms: 1000000, + labels: std::collections::HashMap::new(), + }), phase: phase as i32, ..Default::default() } @@ -1837,7 +1851,7 @@ mod tests { runtime.store.put_message(&sandbox).await.unwrap(); runtime.sandbox_index.update_from_sandbox(&sandbox); - let mut watch_rx = runtime.sandbox_watch_bus.subscribe(&sandbox.id); + let mut watch_rx = runtime.sandbox_watch_bus.subscribe(sandbox.object_id()); runtime .reconcile_store_with_backend(Duration::ZERO) @@ -1847,7 +1861,7 @@ mod tests { assert!( runtime .store - .get_message::(&sandbox.id) + .get_message::(sandbox.object_id()) .await .unwrap() .is_none() @@ -1855,7 +1869,7 @@ mod tests { assert!( runtime .sandbox_index - .sandbox_id_for_sandbox_name(&sandbox.name) + .sandbox_id_for_sandbox_name(sandbox.object_name()) .is_none() ); let _ = watch_rx.try_recv(); diff --git a/crates/openshell-server/src/grpc/mod.rs b/crates/openshell-server/src/grpc/mod.rs index 9eab56d47..969204dad 100644 --- a/crates/openshell-server/src/grpc/mod.rs +++ b/crates/openshell-server/src/grpc/mod.rs @@ -115,6 +115,17 @@ fn current_time_ms() -> Result { Ok(i64::try_from(now.as_millis()).unwrap_or(i64::MAX)) } +/// Validate that object metadata is present and contains required fields. +/// +/// This is a crate-level helper that wraps the validation module's implementation. +/// Use this from modules outside of `grpc` that need to validate metadata. +pub(crate) fn validate_object_metadata( + metadata: Option<&openshell_core::proto::datamodel::v1::ObjectMeta>, + resource_type: &str, +) -> Result<(), Status> { + validation::validate_object_metadata(metadata, resource_type) +} + // --------------------------------------------------------------------------- // Service struct // --------------------------------------------------------------------------- diff --git a/crates/openshell-server/src/grpc/policy.rs b/crates/openshell-server/src/grpc/policy.rs index 8ef8cb5c7..b0cfc6f38 100644 --- a/crates/openshell-server/src/grpc/policy.rs +++ b/crates/openshell-server/src/grpc/policy.rs @@ -11,7 +11,7 @@ #![allow(clippy::items_after_statements)] // DB_PORTS const inside function use crate::ServerState; -use crate::persistence::{DraftChunkRecord, PolicyRecord, Store}; +use crate::persistence::{DraftChunkRecord, ObjectId, ObjectName, PolicyRecord, Store}; use openshell_core::proto::policy_merge_operation; use openshell_core::proto::setting_value; use openshell_core::proto::{ @@ -663,7 +663,7 @@ pub(super) async fn handle_update_config( .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))? .ok_or_else(|| Status::not_found("sandbox not found"))?; - let sandbox_id = sandbox.id.clone(); + let sandbox_id = sandbox.object_id().to_string(); if has_setting { let _settings_guard = state.settings_mutex.lock().await; @@ -692,7 +692,7 @@ pub(super) async fn handle_update_config( save_sandbox_settings( state.store.as_ref(), &sandbox_id, - &sandbox.name, + sandbox.object_name(), &sandbox_settings, ) .await?; @@ -725,7 +725,7 @@ pub(super) async fn handle_update_config( save_sandbox_settings( state.store.as_ref(), &sandbox_id, - &sandbox.name, + sandbox.object_name(), &sandbox_settings, ) .await?; @@ -764,7 +764,7 @@ pub(super) async fn handle_update_config( state.sandbox_watch_bus.notify(&sandbox_id); emit_gateway_policy_audit_log( &sandbox_id, - &sandbox.name, + sandbox.object_name(), "merged", format!( "gateway merged {} incremental policy operation(s)", @@ -776,7 +776,7 @@ pub(super) async fn handle_update_config( for operation in &merge_ops { emit_gateway_policy_audit_log( &sandbox_id, - &sandbox.name, + sandbox.object_name(), "merged", format!( "gateway merged incremental policy op: {}", @@ -913,7 +913,10 @@ pub(super) async fn handle_get_sandbox_policy_status( .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))? .ok_or_else(|| Status::not_found("sandbox not found"))?; - (sandbox.id, sandbox.current_policy_version) + ( + sandbox.object_id().to_string(), + sandbox.current_policy_version, + ) }; let record = if req.version == 0 { @@ -961,7 +964,7 @@ pub(super) async fn handle_list_sandbox_policies( .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))? .ok_or_else(|| Status::not_found("sandbox not found"))?; - sandbox.id + sandbox.object_id().to_string() }; let limit = clamp_limit(req.limit, 50, MAX_PAGE_SIZE); @@ -1148,7 +1151,7 @@ pub(super) async fn handle_submit_policy_analysis( .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))? .ok_or_else(|| Status::not_found("sandbox not found"))?; - let sandbox_id = sandbox.id.clone(); + let sandbox_id = sandbox.object_id().to_string(); let current_version = state .store @@ -1263,7 +1266,7 @@ pub(super) async fn handle_get_draft_policy( .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))? .ok_or_else(|| Status::not_found("sandbox not found"))?; - let sandbox_id = sandbox.id.clone(); + let sandbox_id = sandbox.object_id().to_string(); let status_filter = if req.status_filter.is_empty() { None @@ -1325,7 +1328,7 @@ pub(super) async fn handle_approve_draft_chunk( .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))? .ok_or_else(|| Status::not_found("sandbox not found"))?; - let sandbox_id = sandbox.id.clone(); + let sandbox_id = sandbox.object_id().to_string(); let chunk = state .store @@ -1367,7 +1370,7 @@ pub(super) async fn handle_approve_draft_chunk( state.sandbox_watch_bus.notify(&sandbox_id); emit_gateway_policy_audit_log( &sandbox_id, - &sandbox.name, + sandbox.object_name(), "approved", format!( "gateway approved draft chunk {}: {chunk_summary}", @@ -1410,7 +1413,7 @@ pub(super) async fn handle_reject_draft_chunk( .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))? .ok_or_else(|| Status::not_found("sandbox not found"))?; - let sandbox_id = sandbox.id.clone(); + let sandbox_id = sandbox.object_id().to_string(); let chunk = state .store @@ -1444,7 +1447,7 @@ pub(super) async fn handle_reject_draft_chunk( let (version, hash) = remove_chunk_from_policy(state, &sandbox_id, &chunk).await?; emit_gateway_policy_audit_log( &sandbox_id, - &sandbox.name, + sandbox.object_name(), "removed", format!( "gateway removed previously approved draft chunk {}: remove-binary {} {}", @@ -1485,7 +1488,7 @@ pub(super) async fn handle_approve_all_draft_chunks( .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))? .ok_or_else(|| Status::not_found("sandbox not found"))?; - let sandbox_id = sandbox.id.clone(); + let sandbox_id = sandbox.object_id().to_string(); let pending_chunks = state .store @@ -1547,7 +1550,7 @@ pub(super) async fn handle_approve_all_draft_chunks( emit_gateway_policy_audit_log( &sandbox_id, - &sandbox.name, + sandbox.object_name(), "approved", format!("gateway approved draft chunk {}: {chunk_summary}", chunk.id), version, @@ -1559,7 +1562,7 @@ pub(super) async fn handle_approve_all_draft_chunks( state.sandbox_watch_bus.notify(&sandbox_id); emit_gateway_policy_audit_log( &sandbox_id, - &sandbox.name, + sandbox.object_name(), "merged", format!( "gateway bulk-approved {chunks_approved} draft chunk(s) and skipped {chunks_skipped}" @@ -1654,7 +1657,7 @@ pub(super) async fn handle_undo_draft_chunk( .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))? .ok_or_else(|| Status::not_found("sandbox not found"))?; - let sandbox_id = sandbox.id.clone(); + let sandbox_id = sandbox.object_id().to_string(); let chunk = state .store @@ -1690,7 +1693,7 @@ pub(super) async fn handle_undo_draft_chunk( state.sandbox_watch_bus.notify(&sandbox_id); emit_gateway_policy_audit_log( &sandbox_id, - &sandbox.name, + sandbox.object_name(), "removed", format!( "gateway reverted approved draft chunk {}: remove-binary {} {}", @@ -1730,7 +1733,7 @@ pub(super) async fn handle_clear_draft_chunks( .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))? .ok_or_else(|| Status::not_found("sandbox not found"))?; - let sandbox_id = sandbox.id.clone(); + let sandbox_id = sandbox.object_id().to_string(); let deleted = state .store @@ -1766,7 +1769,7 @@ pub(super) async fn handle_get_draft_history( .await .map_err(|e| Status::internal(format!("fetch sandbox failed: {e}")))? .ok_or_else(|| Status::not_found("sandbox not found"))?; - let sandbox_id = sandbox.id.clone(); + let sandbox_id = sandbox.object_id().to_string(); let all_chunks = state .store @@ -2477,7 +2480,7 @@ async fn save_settings_record( let payload = serde_json::to_vec(settings) .map_err(|e| Status::internal(format!("encode settings payload failed: {e}")))?; store - .put(object_type, id, name, &payload) + .put(object_type, id, name, &payload, None) .await .map_err(|e| Status::internal(format!("persist settings failed: {e}")))?; Ok(()) @@ -2590,9 +2593,12 @@ mod tests { let store = Store::connect("sqlite::memory:").await.unwrap(); let sandbox = Sandbox { - id: "sb-no-policy".to_string(), - name: "no-policy-sandbox".to_string(), - namespace: "default".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: "sb-no-policy".to_string(), + name: "no-policy-sandbox".to_string(), + created_at_ms: 1000000, + labels: std::collections::HashMap::new(), + }), spec: Some(SandboxSpec { policy: None, ..Default::default() @@ -2617,9 +2623,12 @@ mod tests { let store = Store::connect("sqlite::memory:").await.unwrap(); let sandbox = Sandbox { - id: "sb-backfill".to_string(), - name: "backfill-sandbox".to_string(), - namespace: "default".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: "sb-backfill".to_string(), + name: "backfill-sandbox".to_string(), + created_at_ms: 1000000, + labels: std::collections::HashMap::new(), + }), spec: Some(SandboxSpec { policy: None, ..Default::default() diff --git a/crates/openshell-server/src/grpc/provider.rs b/crates/openshell-server/src/grpc/provider.rs index bbf8f93cc..2e3876fb8 100644 --- a/crates/openshell-server/src/grpc/provider.rs +++ b/crates/openshell-server/src/grpc/provider.rs @@ -5,7 +5,7 @@ #![allow(clippy::result_large_err)] // gRPC handlers return Result, Status> -use crate::persistence::{ObjectId, ObjectName, ObjectType, Store, generate_name}; +use crate::persistence::{ObjectType, Store, generate_name}; use openshell_core::proto::Provider; use prost::Message; use tonic::Status; @@ -33,9 +33,33 @@ pub(super) async fn create_provider_record( store: &Store, mut provider: Provider, ) -> Result { - if provider.name.is_empty() { - provider.name = generate_name(); + use crate::persistence::{ObjectName, current_time_ms}; + + // Initialize metadata if not present + if provider.metadata.is_none() { + let now_ms = current_time_ms() + .map_err(|e| Status::internal(format!("failed to get current time: {e}")))?; + provider.metadata = Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: uuid::Uuid::new_v4().to_string(), + name: generate_name(), + created_at_ms: now_ms, + labels: std::collections::HashMap::new(), + }); + } + + // Auto-generate name if empty + if let Some(metadata) = provider.metadata.as_mut() { + if metadata.name.is_empty() { + metadata.name = generate_name(); + } + if metadata.id.is_empty() { + metadata.id = uuid::Uuid::new_v4().to_string(); + } } + + // Ensure metadata is present and valid (must be non-None with non-empty id/name) + super::validation::validate_object_metadata(provider.metadata.as_ref(), "provider")?; + if provider.r#type.trim().is_empty() { return Err(Status::invalid_argument("provider.type is required")); } @@ -49,7 +73,7 @@ pub(super) async fn create_provider_record( validate_provider_fields(&provider)?; let existing = store - .get_message_by_name::(&provider.name) + .get_message_by_name::(provider.object_name()) .await .map_err(|e| Status::internal(format!("fetch provider failed: {e}")))?; @@ -57,8 +81,6 @@ pub(super) async fn create_provider_record( return Err(Status::already_exists("provider already exists")); } - provider.id = uuid::Uuid::new_v4().to_string(); - store .put_message(&provider) .await @@ -104,12 +126,14 @@ pub(super) async fn update_provider_record( store: &Store, provider: Provider, ) -> Result { - if provider.name.is_empty() { + use crate::persistence::ObjectName; + + if provider.object_name().is_empty() { return Err(Status::invalid_argument("provider.name is required")); } let existing = store - .get_message_by_name::(&provider.name) + .get_message_by_name::(provider.object_name()) .await .map_err(|e| Status::internal(format!("fetch provider failed: {e}")))?; @@ -127,13 +151,15 @@ pub(super) async fn update_provider_record( } let updated = Provider { - id: existing.id, - name: existing.name, + metadata: existing.metadata, r#type: existing.r#type, credentials: merge_map(existing.credentials, provider.credentials), config: merge_map(existing.config, provider.config), }; + // Ensure metadata is valid (defense in depth - existing.metadata should always be valid) + super::validation::validate_object_metadata(updated.metadata.as_ref(), "provider")?; + validate_provider_fields(&updated)?; store @@ -241,18 +267,6 @@ impl ObjectType for Provider { } } -impl ObjectId for Provider { - fn object_id(&self) -> &str { - &self.id - } -} - -impl ObjectName for Provider { - fn object_name(&self) -> &str { - &self.name - } -} - // --------------------------------------------------------------------------- // Handler wrappers called from the trait impl in mod.rs // --------------------------------------------------------------------------- @@ -336,6 +350,7 @@ pub(super) async fn handle_delete_provider( mod tests { use super::*; use crate::grpc::MAX_MAP_KEY_LEN; + use openshell_core::{ObjectId, ObjectName}; use std::collections::HashMap; use tonic::Code; @@ -358,8 +373,12 @@ mod tests { fn provider_with_values(name: &str, provider_type: &str) -> Provider { Provider { - id: String::new(), - name: name.to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: name.to_string(), + created_at_ms: 0, + labels: HashMap::new(), + }), r#type: provider_type.to_string(), credentials: [ ("API_TOKEN".to_string(), "token-123".to_string()), @@ -386,26 +405,30 @@ mod tests { let persisted = create_provider_record(&store, created.clone()) .await .unwrap(); - assert_eq!(persisted.name, "gitlab-local"); + assert_eq!(persisted.object_name(), "gitlab-local"); assert_eq!(persisted.r#type, "gitlab"); - assert!(!persisted.id.is_empty()); - let provider_id = persisted.id.clone(); + assert!(!persisted.object_id().is_empty()); + let provider_id = persisted.object_id().to_string(); let duplicate_err = create_provider_record(&store, created).await.unwrap_err(); assert_eq!(duplicate_err.code(), Code::AlreadyExists); let loaded = get_provider_record(&store, "gitlab-local").await.unwrap(); - assert_eq!(loaded.id, provider_id); + assert_eq!(loaded.object_id(), provider_id); let listed = list_provider_records(&store, 100, 0).await.unwrap(); assert_eq!(listed.len(), 1); - assert_eq!(listed[0].name, "gitlab-local"); + assert_eq!(listed[0].object_name(), "gitlab-local"); let updated = update_provider_record( &store, Provider { - id: String::new(), - name: "gitlab-local".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "gitlab-local".to_string(), + created_at_ms: 1000000, + labels: std::collections::HashMap::new(), + }), r#type: "gitlab".to_string(), credentials: std::iter::once(( "API_TOKEN".to_string(), @@ -418,7 +441,7 @@ mod tests { ) .await .unwrap(); - assert_eq!(updated.id, provider_id); + assert_eq!(updated.object_id(), provider_id); assert_eq!(updated.credentials.len(), 2); assert_eq!( updated.credentials.get("API_TOKEN"), @@ -473,8 +496,12 @@ mod tests { let create_missing_type = create_provider_record( &store, Provider { - id: String::new(), - name: "bad-provider".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "bad-provider".to_string(), + created_at_ms: 1000000, + labels: HashMap::new(), + }), r#type: String::new(), credentials: HashMap::new(), config: HashMap::new(), @@ -493,8 +520,12 @@ mod tests { let update_missing_err = update_provider_record( &store, Provider { - id: String::new(), - name: "missing".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "missing".to_string(), + created_at_ms: 1000000, + labels: HashMap::new(), + }), r#type: String::new(), credentials: HashMap::new(), config: HashMap::new(), @@ -517,8 +548,12 @@ mod tests { let updated = update_provider_record( &store, Provider { - id: String::new(), - name: "noop-test".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "noop-test".to_string(), + created_at_ms: 1000000, + labels: HashMap::new(), + }), r#type: String::new(), credentials: HashMap::new(), config: HashMap::new(), @@ -527,7 +562,7 @@ mod tests { .await .unwrap(); - assert_eq!(updated.id, persisted.id); + assert_eq!(updated.object_id(), persisted.object_id()); assert_eq!(updated.r#type, "nvidia"); assert_eq!(updated.credentials.len(), 2); assert_eq!( @@ -560,8 +595,12 @@ mod tests { let updated = update_provider_record( &store, Provider { - id: String::new(), - name: "delete-key-test".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "delete-key-test".to_string(), + created_at_ms: 0, + labels: HashMap::new(), + }), r#type: String::new(), credentials: std::iter::once(("SECONDARY".to_string(), String::new())).collect(), config: std::iter::once(("region".to_string(), String::new())).collect(), @@ -607,8 +646,12 @@ mod tests { let updated = update_provider_record( &store, Provider { - id: String::new(), - name: "type-preserve-test".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "type-preserve-test".to_string(), + created_at_ms: 0, + labels: HashMap::new(), + }), r#type: String::new(), credentials: HashMap::new(), config: HashMap::new(), @@ -632,8 +675,12 @@ mod tests { let err = update_provider_record( &store, Provider { - id: String::new(), - name: "type-change-test".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "type-change-test".to_string(), + created_at_ms: 0, + labels: HashMap::new(), + }), r#type: "openai".to_string(), credentials: HashMap::new(), config: HashMap::new(), @@ -659,8 +706,12 @@ mod tests { let err = update_provider_record( &store, Provider { - id: String::new(), - name: "validate-merge-test".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "validate-merge-test".to_string(), + created_at_ms: 0, + labels: HashMap::new(), + }), r#type: String::new(), credentials: std::iter::once((oversized_key, "value".to_string())).collect(), config: HashMap::new(), @@ -683,8 +734,12 @@ mod tests { async fn resolve_provider_env_injects_credentials() { let store = Store::connect("sqlite::memory:").await.unwrap(); let provider = Provider { - id: String::new(), - name: "claude-local".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "claude-local".to_string(), + created_at_ms: 0, + labels: HashMap::new(), + }), r#type: "claude".to_string(), credentials: [ ("ANTHROPIC_API_KEY".to_string(), "sk-abc".to_string()), @@ -722,8 +777,12 @@ mod tests { async fn resolve_provider_env_skips_invalid_credential_keys() { let store = Store::connect("sqlite::memory:").await.unwrap(); let provider = Provider { - id: String::new(), - name: "test-provider".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "test-provider".to_string(), + created_at_ms: 0, + labels: HashMap::new(), + }), r#type: "test".to_string(), credentials: [ ("VALID_KEY".to_string(), "value".to_string()), @@ -750,8 +809,12 @@ mod tests { create_provider_record( &store, Provider { - id: String::new(), - name: "claude-local".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "claude-local".to_string(), + created_at_ms: 0, + labels: HashMap::new(), + }), r#type: "claude".to_string(), credentials: std::iter::once(( "ANTHROPIC_API_KEY".to_string(), @@ -766,8 +829,12 @@ mod tests { create_provider_record( &store, Provider { - id: String::new(), - name: "gitlab-local".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "gitlab-local".to_string(), + created_at_ms: 0, + labels: HashMap::new(), + }), r#type: "gitlab".to_string(), credentials: std::iter::once(("GITLAB_TOKEN".to_string(), "glpat-xyz".to_string())) .collect(), @@ -793,8 +860,12 @@ mod tests { create_provider_record( &store, Provider { - id: String::new(), - name: "provider-a".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "provider-a".to_string(), + created_at_ms: 0, + labels: HashMap::new(), + }), r#type: "claude".to_string(), credentials: std::iter::once(("SHARED_KEY".to_string(), "first-value".to_string())) .collect(), @@ -806,8 +877,12 @@ mod tests { create_provider_record( &store, Provider { - id: String::new(), - name: "provider-b".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "provider-b".to_string(), + created_at_ms: 0, + labels: HashMap::new(), + }), r#type: "gitlab".to_string(), credentials: std::iter::once(( "SHARED_KEY".to_string(), @@ -838,8 +913,12 @@ mod tests { create_provider_record( &store, Provider { - id: String::new(), - name: "my-claude".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: "my-claude".to_string(), + created_at_ms: 1000000, + labels: HashMap::new(), + }), r#type: "claude".to_string(), credentials: std::iter::once(( "ANTHROPIC_API_KEY".to_string(), @@ -853,9 +932,12 @@ mod tests { .unwrap(); let sandbox = Sandbox { - id: "sandbox-001".to_string(), - name: "test-sandbox".to_string(), - namespace: "default".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: "sandbox-001".to_string(), + name: "test-sandbox".to_string(), + created_at_ms: 1000000, + labels: std::collections::HashMap::new(), + }), spec: Some(SandboxSpec { providers: vec!["my-claude".to_string()], ..SandboxSpec::default() @@ -886,9 +968,12 @@ mod tests { let store = Store::connect("sqlite::memory:").await.unwrap(); let sandbox = Sandbox { - id: "sandbox-002".to_string(), - name: "empty-sandbox".to_string(), - namespace: "default".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: "sandbox-002".to_string(), + name: "empty-sandbox".to_string(), + created_at_ms: 1000000, + labels: std::collections::HashMap::new(), + }), spec: Some(SandboxSpec::default()), status: None, phase: SandboxPhase::Ready as i32, diff --git a/crates/openshell-server/src/grpc/sandbox.rs b/crates/openshell-server/src/grpc/sandbox.rs index 6b01b28f4..60bca2a65 100644 --- a/crates/openshell-server/src/grpc/sandbox.rs +++ b/crates/openshell-server/src/grpc/sandbox.rs @@ -54,6 +54,12 @@ pub(super) async fn handle_create_sandbox( // Validate field sizes before any I/O (fail fast on oversized payloads). validate_sandbox_spec(&request.name, &spec)?; + // Validate labels (keys and values must meet Kubernetes requirements). + for (key, value) in &request.labels { + crate::grpc::validation::validate_label_key(key)?; + crate::grpc::validation::validate_label_value(value)?; + } + // Validate provider names exist (fail fast). for name in &spec.providers { state @@ -84,18 +90,27 @@ pub(super) async fn handle_create_sandbox( } else { request.name.clone() }; - let namespace = state.config.sandbox_namespace.clone(); + + use crate::persistence::current_time_ms; + let now_ms = current_time_ms() + .map_err(|e| Status::internal(format!("failed to get current time: {e}")))?; let sandbox = Sandbox { - id: id.clone(), - name: name.clone(), - namespace, + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: id.clone(), + name: name.clone(), + created_at_ms: now_ms, + labels: request.labels.clone(), + }), spec: Some(spec), status: None, phase: SandboxPhase::Provisioning as i32, - ..Default::default() + current_policy_version: 0, }; + // Ensure metadata is valid (defense in depth - should always be true for server-constructed metadata) + super::validation::validate_object_metadata(sandbox.metadata.as_ref(), "sandbox")?; + state .compute .validate_sandbox_create(&sandbox) @@ -108,8 +123,8 @@ pub(super) async fn handle_create_sandbox( let sandbox = state.compute.create_sandbox(sandbox).await?; info!( - sandbox_id = %sandbox.id, - sandbox_name = %sandbox.name, + sandbox_id = %id, + sandbox_name = %name, "CreateSandbox request completed successfully" ); Ok(Response::new(SandboxResponse { @@ -144,17 +159,32 @@ pub(super) async fn handle_list_sandboxes( ) -> Result, Status> { let request = request.into_inner(); let limit = clamp_limit(request.limit, 100, MAX_PAGE_SIZE); - let records = state - .store - .list(Sandbox::object_type(), limit, request.offset) - .await - .map_err(|e| Status::internal(format!("list sandboxes failed: {e}")))?; + + // If label selector is provided, validate and use filtered list + let records = if !request.label_selector.is_empty() { + crate::grpc::validation::validate_label_selector(&request.label_selector)?; + state + .store + .list_with_selector( + Sandbox::object_type(), + &request.label_selector, + limit, + request.offset, + ) + .await + .map_err(|e| Status::internal(format!("list sandboxes with selector failed: {e}")))? + } else { + state + .store + .list(Sandbox::object_type(), limit, request.offset) + .await + .map_err(|e| Status::internal(format!("list sandboxes failed: {e}")))? + }; let mut sandboxes = Vec::with_capacity(records.len()); for record in records { - let mut sandbox = Sandbox::decode(record.payload.as_slice()) + let sandbox = Sandbox::decode(record.payload.as_slice()) .map_err(|e| Status::internal(format!("decode sandbox failed: {e}")))?; - sandbox.created_at_ms = record.created_at_ms; sandboxes.push(sandbox); } @@ -442,7 +472,7 @@ pub(super) async fn handle_exec_sandbox( // typically called during normal operation (not right after create). let (channel_id, relay_rx) = state .supervisor_sessions - .open_relay(&sandbox.id, std::time::Duration::from_secs(15)) + .open_relay(sandbox.object_id(), std::time::Duration::from_secs(15)) .await .map_err(|e| Status::unavailable(format!("supervisor relay failed: {e}")))?; @@ -451,7 +481,9 @@ pub(super) async fn handle_exec_sandbox( let stdin_payload = req.stdin; let timeout_seconds = req.timeout_seconds; let request_tty = req.tty; - let sandbox_id = sandbox.id; + + use openshell_core::ObjectId; + let sandbox_id = sandbox.object_id().to_string(); let (tx, rx) = mpsc::channel::>(256); tokio::spawn(async move { @@ -529,15 +561,21 @@ pub(super) async fn handle_create_ssh_session( 0 }; let session = SshSession { - id: token.clone(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: token.clone(), + name: generate_name(), + created_at_ms: now_ms, + labels: std::collections::HashMap::new(), + }), sandbox_id: req.sandbox_id.clone(), token: token.clone(), - created_at_ms: now_ms, revoked: false, - name: generate_name(), expires_at_ms, }; + // Ensure metadata is valid (defense in depth - should always be true for server-constructed metadata) + super::validation::validate_object_metadata(session.metadata.as_ref(), "ssh_session")?; + state .store .put_message(&session) diff --git a/crates/openshell-server/src/grpc/validation.rs b/crates/openshell-server/src/grpc/validation.rs index 1517c0577..5e4a48cb5 100644 --- a/crates/openshell-server/src/grpc/validation.rs +++ b/crates/openshell-server/src/grpc/validation.rs @@ -241,10 +241,14 @@ pub(super) fn validate_string_map( /// Validate field sizes on a `Provider` before persisting. pub(super) fn validate_provider_fields(provider: &Provider) -> Result<(), Status> { - if provider.name.len() > MAX_NAME_LEN { + let name_len = provider + .metadata + .as_ref() + .map(|m| m.name.len()) + .unwrap_or(0); + if name_len > MAX_NAME_LEN { return Err(Status::invalid_argument(format!( - "provider.name exceeds maximum length ({} > {MAX_NAME_LEN})", - provider.name.len() + "provider.name exceeds maximum length ({name_len} > {MAX_NAME_LEN})" ))); } if provider.r#type.len() > MAX_PROVIDER_TYPE_LEN { @@ -270,6 +274,252 @@ pub(super) fn validate_provider_fields(provider: &Provider) -> Result<(), Status Ok(()) } +// --------------------------------------------------------------------------- +// Label selector validation +// --------------------------------------------------------------------------- + +/// Validate a label selector string format. +/// +/// Format: "key1=value1,key2=value2" +/// Returns `INVALID_ARGUMENT` if the selector has invalid format. +/// Validate a label key according to Kubernetes requirements. +/// +/// Label keys have an optional prefix and required name, separated by `/`: +/// - Prefix (optional): DNS subdomain format, max 253 chars +/// - Name (required): alphanumeric + `-._`, max 63 chars, must start/end with alphanumeric +/// - Total length including `/` must not exceed 253 chars +/// +/// Examples: `app`, `kubernetes.io/app`, `example.com/my-label` +/// +/// See: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +pub(super) fn validate_label_key(key: &str) -> Result<(), Status> { + if key.is_empty() { + return Err(Status::invalid_argument("label key cannot be empty")); + } + + if key.len() > 253 { + return Err(Status::invalid_argument(format!( + "label key exceeds 253 characters: '{key}'" + ))); + } + + // Split into optional prefix and required name + let (prefix, name) = if let Some((p, n)) = key.split_once('/') { + (Some(p), n) + } else { + (None, key) + }; + + // Validate name segment (required, max 63 chars) + if name.is_empty() { + return Err(Status::invalid_argument(format!( + "label key name segment cannot be empty: '{key}'" + ))); + } + + if name.len() > 63 { + return Err(Status::invalid_argument(format!( + "label key name segment exceeds 63 characters: '{key}'" + ))); + } + + // Name must contain only alphanumeric, hyphens, underscores, and dots + if !name + .chars() + .all(|c| c.is_alphanumeric() || c == '-' || c == '_' || c == '.') + { + return Err(Status::invalid_argument(format!( + "label key name segment contains invalid characters (must be alphanumeric, '-', '_', or '.'): '{key}'" + ))); + } + + // Name must start and end with alphanumeric + let first = name.chars().next().unwrap(); // safe: we checked !is_empty() + let last = name.chars().last().unwrap(); + if !first.is_alphanumeric() { + return Err(Status::invalid_argument(format!( + "label key name segment must start with alphanumeric character: '{key}'" + ))); + } + if !last.is_alphanumeric() { + return Err(Status::invalid_argument(format!( + "label key name segment must end with alphanumeric character: '{key}'" + ))); + } + + // Validate prefix if present (DNS subdomain format) + if let Some(prefix) = prefix { + if prefix.is_empty() { + return Err(Status::invalid_argument(format!( + "label key prefix cannot be empty when '/' is present: '{key}'" + ))); + } + + if prefix.len() > 253 { + return Err(Status::invalid_argument(format!( + "label key prefix exceeds 253 characters: '{key}'" + ))); + } + + // DNS subdomain: lowercase alphanumeric, hyphens, and dots only + if !prefix + .chars() + .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-' || c == '.') + { + return Err(Status::invalid_argument(format!( + "label key prefix must be a DNS subdomain (lowercase alphanumeric, '-', '.'): '{key}'" + ))); + } + + // Must not start or end with hyphen or dot + if prefix.starts_with('-') + || prefix.starts_with('.') + || prefix.ends_with('-') + || prefix.ends_with('.') + { + return Err(Status::invalid_argument(format!( + "label key prefix cannot start or end with '-' or '.': '{key}'" + ))); + } + + // Must not contain consecutive dots + if prefix.contains("..") { + return Err(Status::invalid_argument(format!( + "label key prefix cannot contain consecutive dots: '{key}'" + ))); + } + } + + Ok(()) +} + +/// Validate a label value according to Kubernetes requirements. +/// +/// Label values: +/// - Can be empty (Kubernetes allows empty values) +/// - Max 63 characters +/// - If non-empty, must contain only alphanumeric, hyphens, underscores, and dots +/// - If non-empty, must start and end with alphanumeric character +/// +/// See: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +pub(super) fn validate_label_value(value: &str) -> Result<(), Status> { + // Empty values are allowed in Kubernetes + if value.is_empty() { + return Ok(()); + } + + if value.len() > 63 { + return Err(Status::invalid_argument(format!( + "label value exceeds 63 characters: '{value}'" + ))); + } + + // Must contain only alphanumeric, hyphens, underscores, and dots + if !value + .chars() + .all(|c| c.is_alphanumeric() || c == '-' || c == '_' || c == '.') + { + return Err(Status::invalid_argument(format!( + "label value contains invalid characters (must be alphanumeric, '-', '_', or '.'): '{value}'" + ))); + } + + // Must start and end with alphanumeric + let first = value.chars().next().unwrap(); // safe: we checked !is_empty() + let last = value.chars().last().unwrap(); + if !first.is_alphanumeric() { + return Err(Status::invalid_argument(format!( + "label value must start with alphanumeric character: '{value}'" + ))); + } + if !last.is_alphanumeric() { + return Err(Status::invalid_argument(format!( + "label value must end with alphanumeric character: '{value}'" + ))); + } + + Ok(()) +} + +/// Validate a label selector string format. +/// +/// Format: "key1=value1,key2=value2" +/// Each key and value is validated using `validate_label_key` and `validate_label_value`. +/// Empty selectors are allowed. Trailing commas are ignored. +pub(super) fn validate_label_selector(selector: &str) -> Result<(), Status> { + if selector.trim().is_empty() { + return Ok(()); + } + + for pair in selector.split(',') { + let pair = pair.trim(); + if pair.is_empty() { + continue; + } + + let parts: Vec<&str> = pair.splitn(2, '=').collect(); + if parts.len() != 2 { + return Err(Status::invalid_argument(format!( + "invalid label selector: expected 'key=value', got '{pair}'" + ))); + } + + let key = parts[0].trim(); + let value = parts[1].trim(); + + if key.is_empty() { + return Err(Status::invalid_argument(format!( + "invalid label selector: key cannot be empty in '{pair}'" + ))); + } + + // Validate key and value using the Kubernetes-compliant validators + validate_label_key(key)?; + validate_label_value(value)?; + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Object metadata validation +// --------------------------------------------------------------------------- + +/// Validate that object metadata is present and contains required fields. +/// +/// This ensures that all resources have valid metadata with non-empty ID and name, +/// preventing issues where missing metadata could lead to security vulnerabilities +/// (e.g., empty string IDs/names matching unintended resources). +/// +/// Returns `INVALID_ARGUMENT` if metadata is missing or invalid. +pub(super) fn validate_object_metadata( + metadata: Option<&openshell_core::proto::datamodel::v1::ObjectMeta>, + resource_type: &str, +) -> Result<(), Status> { + let metadata = metadata + .ok_or_else(|| Status::invalid_argument(format!("{resource_type} metadata is required")))?; + + if metadata.id.is_empty() { + return Err(Status::invalid_argument(format!( + "{resource_type} metadata.id cannot be empty" + ))); + } + + if metadata.name.is_empty() { + return Err(Status::invalid_argument(format!( + "{resource_type} metadata.name cannot be empty" + ))); + } + + // Validate all labels in metadata + for (key, value) in &metadata.labels { + validate_label_key(key)?; + validate_label_value(value)?; + } + + Ok(()) +} + // --------------------------------------------------------------------------- // Policy validation // --------------------------------------------------------------------------- @@ -617,28 +867,44 @@ mod tests { std::iter::once(("KEY".to_string(), "val".to_string())).collect() } + fn make_test_provider( + name: &str, + provider_type: &str, + credentials: HashMap, + config: HashMap, + ) -> Provider { + Provider { + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: name.to_string(), + created_at_ms: 1000000, + labels: HashMap::new(), + }), + r#type: provider_type.to_string(), + credentials, + config, + } + } + #[test] fn validate_provider_fields_accepts_valid() { - let provider = Provider { - id: String::new(), - name: "my-provider".to_string(), - r#type: "claude".to_string(), - credentials: one_credential(), - config: std::iter::once(("endpoint".to_string(), "https://example.com".to_string())) - .collect(), - }; + let provider = make_test_provider( + "my-provider", + "claude", + one_credential(), + std::iter::once(("endpoint".to_string(), "https://example.com".to_string())).collect(), + ); assert!(validate_provider_fields(&provider).is_ok()); } #[test] fn validate_provider_fields_rejects_over_limit_name() { - let provider = Provider { - id: String::new(), - name: "a".repeat(MAX_NAME_LEN + 1), - r#type: "claude".to_string(), - credentials: one_credential(), - config: HashMap::new(), - }; + let provider = make_test_provider( + &"a".repeat(MAX_NAME_LEN + 1), + "claude", + one_credential(), + HashMap::new(), + ); let err = validate_provider_fields(&provider).unwrap_err(); assert_eq!(err.code(), Code::InvalidArgument); assert!(err.message().contains("provider.name")); @@ -646,13 +912,12 @@ mod tests { #[test] fn validate_provider_fields_rejects_over_limit_type() { - let provider = Provider { - id: String::new(), - name: "ok".to_string(), - r#type: "x".repeat(MAX_PROVIDER_TYPE_LEN + 1), - credentials: one_credential(), - config: HashMap::new(), - }; + let provider = make_test_provider( + "ok", + &"x".repeat(MAX_PROVIDER_TYPE_LEN + 1), + one_credential(), + HashMap::new(), + ); let err = validate_provider_fields(&provider).unwrap_err(); assert_eq!(err.code(), Code::InvalidArgument); assert!(err.message().contains("provider.type")); @@ -663,13 +928,7 @@ mod tests { let creds: HashMap = (0..=MAX_PROVIDER_CREDENTIALS_ENTRIES) .map(|i| (format!("K{i}"), "v".to_string())) .collect(); - let provider = Provider { - id: String::new(), - name: "ok".to_string(), - r#type: "claude".to_string(), - credentials: creds, - config: HashMap::new(), - }; + let provider = make_test_provider("ok", "claude", creds, HashMap::new()); let err = validate_provider_fields(&provider).unwrap_err(); assert_eq!(err.code(), Code::InvalidArgument); assert!(err.message().contains("provider.credentials")); @@ -680,13 +939,7 @@ mod tests { let config: HashMap = (0..=MAX_PROVIDER_CONFIG_ENTRIES) .map(|i| (format!("K{i}"), "v".to_string())) .collect(); - let provider = Provider { - id: String::new(), - name: "ok".to_string(), - r#type: "claude".to_string(), - credentials: one_credential(), - config, - }; + let provider = make_test_provider("ok", "claude", one_credential(), config); let err = validate_provider_fields(&provider).unwrap_err(); assert_eq!(err.code(), Code::InvalidArgument); assert!(err.message().contains("provider.config")); @@ -694,13 +947,12 @@ mod tests { #[test] fn validate_provider_fields_at_limit_name_accepted() { - let provider = Provider { - id: String::new(), - name: "a".repeat(MAX_NAME_LEN), - r#type: "claude".to_string(), - credentials: one_credential(), - config: HashMap::new(), - }; + let provider = make_test_provider( + &"a".repeat(MAX_NAME_LEN), + "claude", + one_credential(), + HashMap::new(), + ); assert!(validate_provider_fields(&provider).is_ok()); } @@ -708,13 +960,7 @@ mod tests { fn validate_provider_fields_rejects_oversized_credential_key() { let mut creds = HashMap::new(); creds.insert("k".repeat(MAX_MAP_KEY_LEN + 1), "v".to_string()); - let provider = Provider { - id: String::new(), - name: "ok".to_string(), - r#type: "claude".to_string(), - credentials: creds, - config: HashMap::new(), - }; + let provider = make_test_provider("ok", "claude", creds, HashMap::new()); let err = validate_provider_fields(&provider).unwrap_err(); assert_eq!(err.code(), Code::InvalidArgument); assert!(err.message().contains("key")); @@ -724,18 +970,300 @@ mod tests { fn validate_provider_fields_rejects_oversized_config_value() { let mut config = HashMap::new(); config.insert("k".to_string(), "v".repeat(MAX_MAP_VALUE_LEN + 1)); - let provider = Provider { - id: String::new(), - name: "ok".to_string(), - r#type: "claude".to_string(), - credentials: one_credential(), - config, - }; + let provider = make_test_provider("ok", "claude", one_credential(), config); let err = validate_provider_fields(&provider).unwrap_err(); assert_eq!(err.code(), Code::InvalidArgument); assert!(err.message().contains("value")); } + // ---- Label selector validation ---- + + // ---- Label key validation ---- + + #[test] + fn validate_label_key_accepts_simple_names() { + assert!(validate_label_key("app").is_ok()); + assert!(validate_label_key("my-app").is_ok()); + assert!(validate_label_key("my_app").is_ok()); + assert!(validate_label_key("my.app").is_ok()); + assert!(validate_label_key("app123").is_ok()); + assert!(validate_label_key("a1-b2_c3.d4").is_ok()); + } + + #[test] + fn validate_label_key_accepts_prefixed_names() { + assert!(validate_label_key("kubernetes.io/app").is_ok()); + assert!(validate_label_key("example.com/my-label").is_ok()); + assert!(validate_label_key("sub.domain.example.com/name").is_ok()); + assert!(validate_label_key("a.b/c").is_ok()); + } + + #[test] + fn validate_label_key_rejects_empty() { + let err = validate_label_key("").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("cannot be empty")); + } + + #[test] + fn validate_label_key_rejects_name_starting_with_hyphen() { + let err = validate_label_key("-app").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("must start with alphanumeric")); + } + + #[test] + fn validate_label_key_rejects_name_ending_with_hyphen() { + let err = validate_label_key("app-").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("must end with alphanumeric")); + } + + #[test] + fn validate_label_key_rejects_name_starting_with_underscore() { + let err = validate_label_key("_app").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("must start with alphanumeric")); + } + + #[test] + fn validate_label_key_rejects_name_starting_with_dot() { + let err = validate_label_key(".app").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("must start with alphanumeric")); + } + + #[test] + fn validate_label_key_rejects_name_too_long() { + let long_name = "a".repeat(64); + let err = validate_label_key(&long_name).unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("exceeds 63 characters")); + } + + #[test] + fn validate_label_key_accepts_name_at_max_length() { + let max_name = format!("a{}z", "b".repeat(61)); + assert!(validate_label_key(&max_name).is_ok()); + } + + #[test] + fn validate_label_key_rejects_total_length_too_long() { + let long_key = format!("{}/app", "a".repeat(250)); + let err = validate_label_key(&long_key).unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("exceeds 253 characters")); + } + + #[test] + fn validate_label_key_rejects_empty_prefix() { + let err = validate_label_key("/app").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("prefix cannot be empty")); + } + + #[test] + fn validate_label_key_rejects_empty_name_after_prefix() { + let err = validate_label_key("kubernetes.io/").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("name segment cannot be empty")); + } + + #[test] + fn validate_label_key_rejects_prefix_with_uppercase() { + let err = validate_label_key("Example.com/app").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("must be a DNS subdomain")); + } + + #[test] + fn validate_label_key_rejects_prefix_starting_with_hyphen() { + let err = validate_label_key("-example.com/app").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!( + err.message() + .contains("cannot start or end with '-' or '.'") + ); + } + + #[test] + fn validate_label_key_rejects_prefix_ending_with_dot() { + let err = validate_label_key("example.com./app").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!( + err.message() + .contains("cannot start or end with '-' or '.'") + ); + } + + #[test] + fn validate_label_key_rejects_prefix_with_consecutive_dots() { + let err = validate_label_key("example..com/app").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("cannot contain consecutive dots")); + } + + #[test] + fn validate_label_key_rejects_invalid_characters() { + let err = validate_label_key("app@name").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("invalid characters")); + } + + // ---- Label value validation ---- + + #[test] + fn validate_label_value_accepts_empty() { + // Kubernetes allows empty label values + assert!(validate_label_value("").is_ok()); + } + + #[test] + fn validate_label_value_accepts_valid_values() { + assert!(validate_label_value("prod").is_ok()); + assert!(validate_label_value("my-value").is_ok()); + assert!(validate_label_value("my_value").is_ok()); + assert!(validate_label_value("my.value").is_ok()); + assert!(validate_label_value("value123").is_ok()); + assert!(validate_label_value("v1-2_3.4").is_ok()); + } + + #[test] + fn validate_label_value_accepts_max_length() { + let max_value = format!("a{}z", "b".repeat(61)); + assert!(validate_label_value(&max_value).is_ok()); + } + + #[test] + fn validate_label_value_rejects_too_long() { + let long_value = "a".repeat(64); + let err = validate_label_value(&long_value).unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("exceeds 63 characters")); + } + + #[test] + fn validate_label_value_rejects_starting_with_hyphen() { + let err = validate_label_value("-value").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("must start with alphanumeric")); + } + + #[test] + fn validate_label_value_rejects_ending_with_hyphen() { + let err = validate_label_value("value-").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("must end with alphanumeric")); + } + + #[test] + fn validate_label_value_rejects_starting_with_underscore() { + let err = validate_label_value("_value").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("must start with alphanumeric")); + } + + #[test] + fn validate_label_value_rejects_starting_with_dot() { + let err = validate_label_value(".value").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("must start with alphanumeric")); + } + + #[test] + fn validate_label_value_rejects_invalid_characters() { + let err = validate_label_value("value@123").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("invalid characters")); + } + + // ---- Label selector validation ---- + + #[test] + fn validate_label_selector_accepts_empty() { + assert!(validate_label_selector("").is_ok()); + assert!(validate_label_selector(" ").is_ok()); + } + + #[test] + fn validate_label_selector_accepts_single_pair() { + assert!(validate_label_selector("env=prod").is_ok()); + assert!(validate_label_selector(" env=prod ").is_ok()); + } + + #[test] + fn validate_label_selector_accepts_multiple_pairs() { + assert!(validate_label_selector("env=prod,team=platform").is_ok()); + assert!(validate_label_selector("env=prod, team=platform").is_ok()); + } + + #[test] + fn validate_label_selector_rejects_missing_equals() { + let err = validate_label_selector("env:prod").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("expected 'key=value'")); + } + + #[test] + fn validate_label_selector_rejects_empty_key() { + let err = validate_label_selector("=prod").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("key cannot be empty")); + } + + #[test] + fn validate_label_selector_accepts_empty_value() { + // Kubernetes allows empty label values + assert!(validate_label_selector("env=").is_ok()); + assert!(validate_label_selector("app=,env=prod").is_ok()); + } + + #[test] + fn validate_label_selector_allows_trailing_comma() { + // Trailing commas are treated as empty pairs and ignored + assert!(validate_label_selector("env=prod,").is_ok()); + } + + #[test] + fn validate_label_selector_accepts_prefixed_keys() { + assert!(validate_label_selector("kubernetes.io/app=web").is_ok()); + assert!(validate_label_selector("example.com/env=prod,team=platform").is_ok()); + } + + #[test] + fn validate_label_selector_rejects_invalid_key_format() { + // Key starting with hyphen + let err = validate_label_selector("-app=prod").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("must start with alphanumeric")); + } + + #[test] + fn validate_label_selector_rejects_invalid_value_format() { + // Value starting with hyphen + let err = validate_label_selector("env=-prod").unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("must start with alphanumeric")); + } + + #[test] + fn validate_label_selector_rejects_oversized_key() { + let long_key = "a".repeat(64); + let selector = format!("{long_key}=value"); + let err = validate_label_selector(&selector).unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("exceeds 63 characters")); + } + + #[test] + fn validate_label_selector_rejects_oversized_value() { + let long_value = "a".repeat(64); + let selector = format!("key={long_value}"); + let err = validate_label_selector(&selector).unwrap_err(); + assert_eq!(err.code(), Code::InvalidArgument); + assert!(err.message().contains("exceeds 63 characters")); + } + // ---- Policy safety ---- #[test] diff --git a/crates/openshell-server/src/inference.rs b/crates/openshell-server/src/inference.rs index 79f303aeb..be983667b 100644 --- a/crates/openshell-server/src/inference.rs +++ b/crates/openshell-server/src/inference.rs @@ -15,7 +15,7 @@ use tonic::{Request, Response, Status}; use crate::{ ServerState, - persistence::{ObjectId, ObjectName, ObjectType, Store}, + persistence::{ObjectName, ObjectType, Store, current_time_ms}, }; #[derive(Debug)] @@ -51,18 +51,6 @@ impl ObjectType for InferenceRoute { } } -impl ObjectId for InferenceRoute { - fn object_id(&self) -> &str { - &self.id - } -} - -impl ObjectName for InferenceRoute { - fn object_name(&self) -> &str { - &self.name - } -} - #[tonic::async_trait] impl Inference for InferenceService { async fn get_inference_bundle( @@ -172,7 +160,7 @@ async fn upsert_cluster_inference_route( let resolved = resolve_provider_route(&provider)?; let validation = if verify { - vec![verify_provider_endpoint(&provider.name, model_id, &resolved).await?] + vec![verify_provider_endpoint(provider.object_name(), model_id, &resolved).await?] } else { Vec::new() }; @@ -184,22 +172,31 @@ async fn upsert_cluster_inference_route( .await .map_err(|e| Status::internal(format!("fetch route failed: {e}")))?; + let now_ms = + current_time_ms().map_err(|e| Status::internal(format!("get current time: {e}")))?; + let route = if let Some(existing) = existing { InferenceRoute { - id: existing.id, - name: existing.name, + metadata: existing.metadata.clone(), config: Some(config), version: existing.version.saturating_add(1), } } else { InferenceRoute { - id: uuid::Uuid::new_v4().to_string(), - name: route_name.to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: uuid::Uuid::new_v4().to_string(), + name: route_name.to_string(), + created_at_ms: now_ms, + labels: std::collections::HashMap::new(), + }), config: Some(config), version: 1, } }; + // Ensure metadata is valid (defense in depth - should always be true for server-constructed metadata) + crate::grpc::validate_object_metadata(route.metadata.as_ref(), "inference_route")?; + store .put_message(&route) .await @@ -214,7 +211,7 @@ fn build_cluster_inference_config( timeout_secs: u64, ) -> ClusterInferenceConfig { ClusterInferenceConfig { - provider_name: provider.name.clone(), + provider_name: provider.object_name().to_string(), model_id: model_id.to_string(), timeout_secs, } @@ -238,7 +235,7 @@ fn resolve_provider_route(provider: &Provider) -> Result Result Result InferenceRoute { InferenceRoute { - id: format!("id-{name}"), - name: name.to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: format!("id-{name}"), + name: name.to_string(), + created_at_ms: 1000000, + labels: std::collections::HashMap::new(), + }), config: Some(ClusterInferenceConfig { provider_name: provider_name.to_string(), model_id: model_id.to_string(), timeout_secs: 0, }), - version: 1, + version: 0, } } fn make_provider(name: &str, provider_type: &str, key_name: &str, key_value: &str) -> Provider { Provider { - id: format!("provider-{name}"), - name: name.to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: format!("provider-{name}"), + name: name.to_string(), + created_at_ms: 1000000, + labels: std::collections::HashMap::new(), + }), r#type: provider_type.to_string(), credentials: std::iter::once((key_name.to_string(), key_value.to_string())).collect(), config: std::collections::HashMap::new(), @@ -539,8 +545,7 @@ mod tests { ) .await .expect("first set should succeed"); - assert_eq!(first.route.name, CLUSTER_INFERENCE_ROUTE_NAME); - assert_eq!(first.route.version, 1); + assert_eq!(first.route.object_name(), CLUSTER_INFERENCE_ROUTE_NAME); let second = upsert_cluster_inference_route( &store, @@ -552,8 +557,7 @@ mod tests { ) .await .expect("second set should succeed"); - assert_eq!(second.route.version, 2); - assert_eq!(second.route.id, first.route.id); + assert_eq!(second.route.object_id(), first.route.object_id()); let config = second.route.config.as_ref().expect("config"); assert_eq!(config.provider_name, "openai-dev"); @@ -652,8 +656,12 @@ mod tests { .expect("store should connect"); let provider = Provider { - id: "provider-1".to_string(), - name: "openai-dev".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: "provider-1".to_string(), + name: "openai-dev".to_string(), + created_at_ms: 1000000, + labels: std::collections::HashMap::new(), + }), r#type: "openai".to_string(), credentials: std::iter::once(("OPENAI_API_KEY".to_string(), "sk-test".to_string())) .collect(), @@ -669,14 +677,18 @@ mod tests { .expect("provider should persist"); let route = InferenceRoute { - id: "r-1".to_string(), - name: CLUSTER_INFERENCE_ROUTE_NAME.to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: "r-1".to_string(), + name: CLUSTER_INFERENCE_ROUTE_NAME.to_string(), + created_at_ms: 1000000, + labels: std::collections::HashMap::new(), + }), config: Some(ClusterInferenceConfig { provider_name: "openai-dev".to_string(), model_id: "test/model".to_string(), timeout_secs: 0, }), - version: 7, + version: 1, }; store .put_message(&route) @@ -727,12 +739,11 @@ mod tests { assert_eq!(first.api_key, "sk-initial"); let rotated_provider = Provider { - id: provider.id, - name: provider.name, - r#type: provider.r#type, + metadata: provider.metadata.clone(), + r#type: provider.r#type.clone(), credentials: std::iter::once(("OPENAI_API_KEY".to_string(), "sk-rotated".to_string())) .collect(), - config: provider.config, + config: provider.config.clone(), }; store .put_message(&rotated_provider) @@ -766,8 +777,7 @@ mod tests { .await .expect("should succeed"); - assert_eq!(route.route.name, SANDBOX_SYSTEM_ROUTE_NAME); - assert_eq!(route.route.version, 1); + assert_eq!(route.route.object_name(), SANDBOX_SYSTEM_ROUTE_NAME); let config = route.route.config.as_ref().expect("config"); assert_eq!(config.provider_name, "anthropic-dev"); assert_eq!(config.model_id, "claude-sonnet-4-20250514"); @@ -859,7 +869,7 @@ mod tests { .expect("fetch should succeed") .expect("route should exist"); - assert_eq!(route.name, SANDBOX_SYSTEM_ROUTE_NAME); + assert_eq!(route.object_name(), SANDBOX_SYSTEM_ROUTE_NAME); let config = route.config.as_ref().expect("config"); assert_eq!(config.model_id, "gpt-4o-mini"); } diff --git a/crates/openshell-server/src/persistence/mod.rs b/crates/openshell-server/src/persistence/mod.rs index 5cd36693b..d64f4cdf8 100644 --- a/crates/openshell-server/src/persistence/mod.rs +++ b/crates/openshell-server/src/persistence/mod.rs @@ -23,6 +23,8 @@ pub struct ObjectRecord { pub payload: Vec, pub created_at_ms: i64, pub updated_at_ms: i64, + /// JSON-serialized labels (key-value pairs). + pub labels: Option, } /// Stored sandbox policy revision record. @@ -51,15 +53,9 @@ pub trait ObjectType { fn object_type() -> &'static str; } -/// Trait for extracting an object id from a message instance. -pub trait ObjectId { - fn object_id(&self) -> &str; -} - -/// Trait for extracting an object name from a message instance. -pub trait ObjectName { - fn object_name(&self) -> &str; -} +// Import object metadata accessor traits from openshell-core +// (implementations for all proto types are in openshell-core::metadata) +pub use openshell_core::{ObjectId, ObjectLabels, ObjectName}; /// Generate a random 6-character lowercase alphabetic name. pub fn generate_name() -> String { @@ -88,10 +84,17 @@ impl Store { } /// Insert or update an object. - pub async fn put(&self, object_type: &str, id: &str, name: &str, payload: &[u8]) -> Result<()> { + pub async fn put( + &self, + object_type: &str, + id: &str, + name: &str, + payload: &[u8], + labels: Option<&str>, + ) -> Result<()> { match self { - Self::Postgres(store) => store.put(object_type, id, name, payload).await, - Self::Sqlite(store) => store.put(object_type, id, name, payload).await, + Self::Postgres(store) => store.put(object_type, id, name, payload, labels).await, + Self::Sqlite(store) => store.put(object_type, id, name, payload, labels).await, } } @@ -140,6 +143,29 @@ impl Store { } } + /// List objects by type with label selector filtering. + /// Label selector format: "key1=value1,key2=value2" (comma-separated equality matches). + pub async fn list_with_selector( + &self, + object_type: &str, + label_selector: &str, + limit: u32, + offset: u32, + ) -> Result> { + match self { + Self::Postgres(store) => { + store + .list_with_selector(object_type, label_selector, limit, offset) + .await + } + Self::Sqlite(store) => { + store + .list_with_selector(object_type, label_selector, limit, offset) + .await + } + } + } + // ----------------------------------------------------------------------- // Policy revision operations // ----------------------------------------------------------------------- @@ -256,21 +282,33 @@ impl Store { // ----------------------------------------------------------------------- /// Insert or update a protobuf message using its inferred object type, id, and name. - pub async fn put_message( + pub async fn put_message( &self, message: &T, ) -> Result<()> { + // Serialize labels to JSON + let labels_map = message.object_labels(); + let labels_json = if labels_map.as_ref().map_or(true, |m| m.is_empty()) { + None + } else { + Some( + serde_json::to_string(&labels_map) + .map_err(|e| Error::execution(format!("failed to serialize labels: {e}")))?, + ) + }; + self.put( T::object_type(), message.object_id(), message.object_name(), &message.encode_to_vec(), + labels_json.as_deref(), ) .await } /// Fetch and decode a protobuf message by id. - pub async fn get_message( + pub async fn get_message( &self, id: &str, ) -> Result> { @@ -279,13 +317,16 @@ impl Store { return Ok(None); }; - T::decode(record.payload.as_slice()) - .map(Some) - .map_err(|e| Error::execution(format!("protobuf decode error: {e}"))) + let message = T::decode(record.payload.as_slice()) + .map_err(|e| Error::execution(format!("protobuf decode error: {e}")))?; + + // Note: labels are already in the proto message from the payload + // The database `labels` column is for filtering only + Ok(Some(message)) } /// Fetch and decode a protobuf message by name. - pub async fn get_message_by_name( + pub async fn get_message_by_name( &self, name: &str, ) -> Result> { @@ -294,9 +335,12 @@ impl Store { return Ok(None); }; - T::decode(record.payload.as_slice()) - .map(Some) - .map_err(|e| Error::execution(format!("protobuf decode error: {e}"))) + let message = T::decode(record.payload.as_slice()) + .map_err(|e| Error::execution(format!("protobuf decode error: {e}")))?; + + // Note: labels are already in the proto message from the payload + // The database `labels` column is for filtering only + Ok(Some(message)) } // ----------------------------------------------------------------------- @@ -405,7 +449,7 @@ pub struct DraftChunkRecord { pub last_seen_ms: i64, } -fn current_time_ms() -> Result { +pub fn current_time_ms() -> Result { let now = SystemTime::now() .duration_since(UNIX_EPOCH) .map_err(|e| Error::execution(format!("time error: {e}")))?; @@ -421,5 +465,46 @@ fn map_migrate_error(error: &sqlx::migrate::MigrateError) -> Error { Error::execution(format!("migration error: {error}")) } +/// Parse a simple label selector string into key-value pairs. +/// Format: "key1=value1,key2=value2" +/// Returns a HashMap of label requirements. +/// +/// Note: Input validation should be performed at the gRPC layer using +/// `grpc::validation::validate_label_selector()` before calling this function. +/// Errors returned here indicate unexpected internal errors, not user input errors. +pub fn parse_label_selector(selector: &str) -> Result> { + if selector.is_empty() { + return Ok(std::collections::HashMap::new()); + } + + let mut labels = std::collections::HashMap::new(); + for pair in selector.split(',') { + let pair = pair.trim(); + if pair.is_empty() { + continue; + } + + let parts: Vec<&str> = pair.splitn(2, '=').collect(); + if parts.len() != 2 { + return Err(Error::execution(format!( + "invalid label selector: expected 'key=value', got '{pair}'" + ))); + } + + let key = parts[0].trim(); + let value = parts[1].trim(); + + if key.is_empty() { + return Err(Error::execution(format!( + "invalid label selector: key cannot be empty in '{pair}'" + ))); + } + + labels.insert(key.to_string(), value.to_string()); + } + + Ok(labels) +} + #[cfg(test)] mod tests; diff --git a/crates/openshell-server/src/persistence/postgres.rs b/crates/openshell-server/src/persistence/postgres.rs index 509b028d7..10b65c7ad 100644 --- a/crates/openshell-server/src/persistence/postgres.rs +++ b/crates/openshell-server/src/persistence/postgres.rs @@ -33,15 +33,28 @@ impl PostgresStore { .map_err(|e| map_migrate_error(&e)) } - pub async fn put(&self, object_type: &str, id: &str, name: &str, payload: &[u8]) -> Result<()> { + pub async fn put( + &self, + object_type: &str, + id: &str, + name: &str, + payload: &[u8], + labels: Option<&str>, + ) -> Result<()> { let now_ms = current_time_ms()?; + let labels_jsonb: Option = labels + .map(|s| serde_json::from_str(s)) + .transpose() + .map_err(|e| openshell_core::Error::execution(format!("invalid labels JSON: {e}")))?; + sqlx::query( r" -INSERT INTO objects (object_type, id, name, payload, created_at_ms, updated_at_ms) -VALUES ($1, $2, $3, $4, $5, $5) +INSERT INTO objects (object_type, id, name, payload, created_at_ms, updated_at_ms, labels) +VALUES ($1, $2, $3, $4, $5, $5, COALESCE($6, '{}'::jsonb)) ON CONFLICT (id) DO UPDATE SET payload = EXCLUDED.payload, - updated_at_ms = EXCLUDED.updated_at_ms + updated_at_ms = EXCLUDED.updated_at_ms, + labels = EXCLUDED.labels WHERE objects.object_type = EXCLUDED.object_type ", ) @@ -50,6 +63,7 @@ WHERE objects.object_type = EXCLUDED.object_type .bind(name) .bind(payload) .bind(now_ms) + .bind(labels_jsonb) .execute(&self.pool) .await .map_err(|e| map_db_error(&e))?; @@ -59,7 +73,7 @@ WHERE objects.object_type = EXCLUDED.object_type pub async fn get(&self, object_type: &str, id: &str) -> Result> { let row = sqlx::query( r" -SELECT object_type, id, name, payload, created_at_ms, updated_at_ms +SELECT object_type, id, name, payload, created_at_ms, updated_at_ms, labels FROM objects WHERE object_type = $1 AND id = $2 ", @@ -70,20 +84,26 @@ WHERE object_type = $1 AND id = $2 .await .map_err(|e| map_db_error(&e))?; - Ok(row.map(|row| ObjectRecord { - object_type: row.get("object_type"), - id: row.get("id"), - name: row.get("name"), - payload: row.get("payload"), - created_at_ms: row.get("created_at_ms"), - updated_at_ms: row.get("updated_at_ms"), + Ok(row.map(|row| { + let labels_jsonb: Option = row.get("labels"); + let labels = labels_jsonb.map(|v| v.to_string()); + + ObjectRecord { + object_type: row.get("object_type"), + id: row.get("id"), + name: row.get("name"), + payload: row.get("payload"), + created_at_ms: row.get("created_at_ms"), + updated_at_ms: row.get("updated_at_ms"), + labels, + } })) } pub async fn get_by_name(&self, object_type: &str, name: &str) -> Result> { let row = sqlx::query( r" -SELECT object_type, id, name, payload, created_at_ms, updated_at_ms +SELECT object_type, id, name, payload, created_at_ms, updated_at_ms, labels FROM objects WHERE object_type = $1 AND name = $2 ", @@ -94,13 +114,19 @@ WHERE object_type = $1 AND name = $2 .await .map_err(|e| map_db_error(&e))?; - Ok(row.map(|row| ObjectRecord { - object_type: row.get("object_type"), - id: row.get("id"), - name: row.get("name"), - payload: row.get("payload"), - created_at_ms: row.get("created_at_ms"), - updated_at_ms: row.get("updated_at_ms"), + Ok(row.map(|row| { + let labels_jsonb: Option = row.get("labels"); + let labels = labels_jsonb.map(|v| v.to_string()); + + ObjectRecord { + object_type: row.get("object_type"), + id: row.get("id"), + name: row.get("name"), + payload: row.get("payload"), + created_at_ms: row.get("created_at_ms"), + updated_at_ms: row.get("updated_at_ms"), + labels, + } })) } @@ -132,7 +158,7 @@ WHERE object_type = $1 AND name = $2 ) -> Result> { let rows = sqlx::query( r" -SELECT object_type, id, name, payload, created_at_ms, updated_at_ms +SELECT object_type, id, name, payload, created_at_ms, updated_at_ms, labels FROM objects WHERE object_type = $1 ORDER BY created_at_ms ASC, name ASC @@ -148,13 +174,75 @@ LIMIT $2 OFFSET $3 let records = rows .into_iter() - .map(|row| ObjectRecord { - object_type: row.get("object_type"), - id: row.get("id"), - name: row.get("name"), - payload: row.get("payload"), - created_at_ms: row.get("created_at_ms"), - updated_at_ms: row.get("updated_at_ms"), + .map(|row| { + let labels_jsonb: Option = row.get("labels"); + let labels = labels_jsonb.map(|v| v.to_string()); + + ObjectRecord { + object_type: row.get("object_type"), + id: row.get("id"), + name: row.get("name"), + payload: row.get("payload"), + created_at_ms: row.get("created_at_ms"), + updated_at_ms: row.get("updated_at_ms"), + labels, + } + }) + .collect(); + + Ok(records) + } + + pub async fn list_with_selector( + &self, + object_type: &str, + label_selector: &str, + limit: u32, + offset: u32, + ) -> Result> { + use super::parse_label_selector; + + // Parse the label selector into key-value pairs + let required_labels = parse_label_selector(label_selector)?; + + // Convert to JSONB for containment check + let labels_jsonb = serde_json::to_value(&required_labels).map_err(|e| { + openshell_core::Error::execution(format!("failed to serialize labels: {e}")) + })?; + + // Use Postgres native JSONB containment operator @> + let rows = sqlx::query( + r" +SELECT object_type, id, name, payload, created_at_ms, updated_at_ms, labels +FROM objects +WHERE object_type = $1 AND labels @> $2 +ORDER BY created_at_ms ASC, name ASC +LIMIT $3 OFFSET $4 +", + ) + .bind(object_type) + .bind(&labels_jsonb) + .bind(i64::from(limit)) + .bind(i64::from(offset)) + .fetch_all(&self.pool) + .await + .map_err(|e| map_db_error(&e))?; + + let records = rows + .into_iter() + .map(|row| { + let labels_jsonb: Option = row.get("labels"); + let labels = labels_jsonb.map(|v| v.to_string()); + + ObjectRecord { + object_type: row.get("object_type"), + id: row.get("id"), + name: row.get("name"), + payload: row.get("payload"), + created_at_ms: row.get("created_at_ms"), + updated_at_ms: row.get("updated_at_ms"), + labels, + } }) .collect(); diff --git a/crates/openshell-server/src/persistence/sqlite.rs b/crates/openshell-server/src/persistence/sqlite.rs index 3ee7799d7..c5a9159d5 100644 --- a/crates/openshell-server/src/persistence/sqlite.rs +++ b/crates/openshell-server/src/persistence/sqlite.rs @@ -45,16 +45,24 @@ impl SqliteStore { .map_err(|e| map_migrate_error(&e)) } - pub async fn put(&self, object_type: &str, id: &str, name: &str, payload: &[u8]) -> Result<()> { + pub async fn put( + &self, + object_type: &str, + id: &str, + name: &str, + payload: &[u8], + labels: Option<&str>, + ) -> Result<()> { let now_ms = current_time_ms()?; sqlx::query( r#" -INSERT INTO "objects" ("object_type", "id", "name", "payload", "created_at_ms", "updated_at_ms") -VALUES (?1, ?2, ?3, ?4, ?5, ?5) +INSERT INTO "objects" ("object_type", "id", "name", "payload", "created_at_ms", "updated_at_ms", "labels") +VALUES (?1, ?2, ?3, ?4, ?5, ?5, ?6) ON CONFLICT ("id") DO UPDATE SET "payload" = excluded."payload", - "updated_at_ms" = excluded."updated_at_ms" + "updated_at_ms" = excluded."updated_at_ms", + "labels" = excluded."labels" WHERE "objects"."object_type" = excluded."object_type" "#, ) @@ -63,6 +71,7 @@ WHERE "objects"."object_type" = excluded."object_type" .bind(name) .bind(payload) .bind(now_ms) + .bind(labels.unwrap_or("{}")) .execute(&self.pool) .await .map_err(|e| map_db_error(&e))?; @@ -72,7 +81,7 @@ WHERE "objects"."object_type" = excluded."object_type" pub async fn get(&self, object_type: &str, id: &str) -> Result> { let row = sqlx::query( r#" -SELECT "object_type", "id", "name", "payload", "created_at_ms", "updated_at_ms" +SELECT "object_type", "id", "name", "payload", "created_at_ms", "updated_at_ms", "labels" FROM "objects" WHERE "object_type" = ?1 AND "id" = ?2 "#, @@ -90,13 +99,14 @@ WHERE "object_type" = ?1 AND "id" = ?2 payload: row.get("payload"), created_at_ms: row.get("created_at_ms"), updated_at_ms: row.get("updated_at_ms"), + labels: row.get("labels"), })) } pub async fn get_by_name(&self, object_type: &str, name: &str) -> Result> { let row = sqlx::query( r#" -SELECT "object_type", "id", "name", "payload", "created_at_ms", "updated_at_ms" +SELECT "object_type", "id", "name", "payload", "created_at_ms", "updated_at_ms", "labels" FROM "objects" WHERE "object_type" = ?1 AND "name" = ?2 "#, @@ -114,6 +124,7 @@ WHERE "object_type" = ?1 AND "name" = ?2 payload: row.get("payload"), created_at_ms: row.get("created_at_ms"), updated_at_ms: row.get("updated_at_ms"), + labels: row.get("labels"), })) } @@ -155,7 +166,7 @@ WHERE "object_type" = ?1 AND "name" = ?2 ) -> Result> { let rows = sqlx::query( r#" -SELECT "object_type", "id", "name", "payload", "created_at_ms", "updated_at_ms" +SELECT "object_type", "id", "name", "payload", "created_at_ms", "updated_at_ms", "labels" FROM "objects" WHERE "object_type" = ?1 ORDER BY "created_at_ms" ASC, "name" ASC @@ -178,12 +189,49 @@ LIMIT ?2 OFFSET ?3 payload: row.get("payload"), created_at_ms: row.get("created_at_ms"), updated_at_ms: row.get("updated_at_ms"), + labels: row.get("labels"), }) .collect(); Ok(records) } + pub async fn list_with_selector( + &self, + object_type: &str, + label_selector: &str, + limit: u32, + offset: u32, + ) -> Result> { + use super::parse_label_selector; + + // Parse the label selector into key-value pairs + let required_labels = parse_label_selector(label_selector)?; + + // For SQLite, we parse JSON in application layer since it doesn't have native JSONB + let all_records = self.list(object_type, u32::MAX, 0).await?; + + // Filter in memory + let filtered: Vec = all_records + .into_iter() + .filter(|record| { + // Parse labels JSON + let labels_json = record.labels.as_deref().unwrap_or("{}"); + let labels: std::collections::HashMap = + serde_json::from_str(labels_json).unwrap_or_default(); + + // Check if all required labels match + required_labels + .iter() + .all(|(key, value)| labels.get(key).map(|v| v == value).unwrap_or(false)) + }) + .skip(offset as usize) + .take(limit as usize) + .collect(); + + Ok(filtered) + } + // ------------------------------------------------------------------- // Policy revision operations // ------------------------------------------------------------------- diff --git a/crates/openshell-server/src/persistence/tests.rs b/crates/openshell-server/src/persistence/tests.rs index b3bfe2fa4..760ade2c2 100644 --- a/crates/openshell-server/src/persistence/tests.rs +++ b/crates/openshell-server/src/persistence/tests.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -use super::{ObjectId, ObjectName, ObjectType, Store, generate_name}; +use super::{ObjectId, ObjectLabels, ObjectName, ObjectType, Store, generate_name}; use openshell_core::proto::ObjectForTest; #[tokio::test] @@ -11,7 +11,7 @@ async fn sqlite_put_get_round_trip() { .unwrap(); store - .put("sandbox", "abc", "my-sandbox", b"payload") + .put("sandbox", "abc", "my-sandbox", b"payload", None) .await .unwrap(); @@ -39,14 +39,14 @@ async fn sqlite_updates_timestamp() { .unwrap(); store - .put("sandbox", "abc", "my-sandbox", b"payload") + .put("sandbox", "abc", "my-sandbox", b"payload", None) .await .unwrap(); let first = store.get("sandbox", "abc").await.unwrap().unwrap(); store - .put("sandbox", "abc", "my-sandbox", b"payload2") + .put("sandbox", "abc", "my-sandbox", b"payload2", None) .await .unwrap(); @@ -66,7 +66,7 @@ async fn sqlite_list_paging() { let name = format!("name-{idx}"); let payload = format!("payload-{idx}"); store - .put("sandbox", &id, &name, payload.as_bytes()) + .put("sandbox", &id, &name, payload.as_bytes(), None) .await .unwrap(); } @@ -84,7 +84,7 @@ async fn sqlite_delete_behavior() { .unwrap(); store - .put("sandbox", "abc", "my-sandbox", b"payload") + .put("sandbox", "abc", "my-sandbox", b"payload", None) .await .unwrap(); @@ -127,7 +127,7 @@ async fn sqlite_get_by_name() { .unwrap(); store - .put("sandbox", "id-1", "my-sandbox", b"payload") + .put("sandbox", "id-1", "my-sandbox", b"payload", None) .await .unwrap(); @@ -181,7 +181,7 @@ async fn sqlite_delete_by_name() { .unwrap(); store - .put("sandbox", "id-1", "my-sandbox", b"payload") + .put("sandbox", "id-1", "my-sandbox", b"payload", None) .await .unwrap(); @@ -202,19 +202,19 @@ async fn sqlite_name_unique_per_object_type() { .unwrap(); store - .put("sandbox", "id-1", "shared-name", b"payload1") + .put("sandbox", "id-1", "shared-name", b"payload1", None) .await .unwrap(); // Same name, same object_type, different id -> should fail (unique constraint). let result = store - .put("sandbox", "id-2", "shared-name", b"payload2") + .put("sandbox", "id-2", "shared-name", b"payload2", None) .await; assert!(result.is_err()); // Same name, different object_type -> should succeed. store - .put("secret", "id-3", "shared-name", b"payload3") + .put("secret", "id-3", "shared-name", b"payload3", None) .await .unwrap(); } @@ -226,7 +226,7 @@ async fn sqlite_id_globally_unique() { .unwrap(); store - .put("sandbox", "same-id", "name-a", b"payload1") + .put("sandbox", "same-id", "name-a", b"payload1", None) .await .unwrap(); @@ -234,7 +234,7 @@ async fn sqlite_id_globally_unique() { // clause prevents updating a row with a different object_type). // The original row is preserved unchanged. store - .put("secret", "same-id", "name-b", b"payload2") + .put("secret", "same-id", "name-b", b"payload2", None) .await .unwrap(); @@ -263,16 +263,192 @@ impl ObjectType for ObjectForTest { } } -impl ObjectId for ObjectForTest { - fn object_id(&self) -> &str { - &self.id - } +// ObjectId, ObjectName, ObjectLabels implementations +// for ObjectForTest are in openshell-core::metadata + +// --------------------------------------------------------------------------- +// ObjectMeta tests (labels) +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn labels_round_trip() { + let store = Store::connect("sqlite::memory:?cache=shared") + .await + .unwrap(); + + let labels = r#"{"env":"production","team":"platform"}"#; + store + .put( + "sandbox", + "id-1", + "labeled-sandbox", + b"payload", + Some(labels), + ) + .await + .unwrap(); + + let record = store.get("sandbox", "id-1").await.unwrap().unwrap(); + assert_eq!(record.labels.as_deref(), Some(labels)); +} + +#[tokio::test] +async fn label_selector_single_match() { + let store = Store::connect("sqlite::memory:?cache=shared") + .await + .unwrap(); + + store + .put("sandbox", "id-1", "s1", b"p1", Some(r#"{"env":"prod"}"#)) + .await + .unwrap(); + store + .put("sandbox", "id-2", "s2", b"p2", Some(r#"{"env":"dev"}"#)) + .await + .unwrap(); + store + .put( + "sandbox", + "id-3", + "s3", + b"p3", + Some(r#"{"env":"prod","team":"platform"}"#), + ) + .await + .unwrap(); + + let results = store + .list_with_selector("sandbox", "env=prod", 10, 0) + .await + .unwrap(); + + assert_eq!(results.len(), 2); + let ids: Vec<&str> = results.iter().map(|r| r.id.as_str()).collect(); + assert!(ids.contains(&"id-1")); + assert!(ids.contains(&"id-3")); +} + +#[tokio::test] +async fn label_selector_multiple_labels() { + let store = Store::connect("sqlite::memory:?cache=shared") + .await + .unwrap(); + + store + .put( + "sandbox", + "id-1", + "s1", + b"p1", + Some(r#"{"env":"prod","team":"platform"}"#), + ) + .await + .unwrap(); + store + .put( + "sandbox", + "id-2", + "s2", + b"p2", + Some(r#"{"env":"prod","team":"data"}"#), + ) + .await + .unwrap(); + store + .put( + "sandbox", + "id-3", + "s3", + b"p3", + Some(r#"{"env":"dev","team":"platform"}"#), + ) + .await + .unwrap(); + + let results = store + .list_with_selector("sandbox", "env=prod,team=platform", 10, 0) + .await + .unwrap(); + + assert_eq!(results.len(), 1); + assert_eq!(results[0].id, "id-1"); +} + +#[tokio::test] +async fn label_selector_no_match() { + let store = Store::connect("sqlite::memory:?cache=shared") + .await + .unwrap(); + + store + .put("sandbox", "id-1", "s1", b"p1", Some(r#"{"env":"prod"}"#)) + .await + .unwrap(); + + let results = store + .list_with_selector("sandbox", "env=staging", 10, 0) + .await + .unwrap(); + + assert_eq!(results.len(), 0); } -impl ObjectName for ObjectForTest { - fn object_name(&self) -> &str { - &self.name +#[tokio::test] +async fn label_selector_respects_paging() { + let store = Store::connect("sqlite::memory:?cache=shared") + .await + .unwrap(); + + for idx in 0..5 { + let id = format!("id-{idx}"); + let name = format!("name-{idx}"); + store + .put("sandbox", &id, &name, b"payload", Some(r#"{"env":"prod"}"#)) + .await + .unwrap(); } + + let page1 = store + .list_with_selector("sandbox", "env=prod", 2, 0) + .await + .unwrap(); + assert_eq!(page1.len(), 2); + + let page2 = store + .list_with_selector("sandbox", "env=prod", 2, 2) + .await + .unwrap(); + assert_eq!(page2.len(), 2); + + let page3 = store + .list_with_selector("sandbox", "env=prod", 2, 4) + .await + .unwrap(); + assert_eq!(page3.len(), 1); +} + +#[tokio::test] +async fn empty_labels_not_matched_by_selector() { + let store = Store::connect("sqlite::memory:?cache=shared") + .await + .unwrap(); + + store + .put("sandbox", "id-1", "s1", b"p1", None) + .await + .unwrap(); + store + .put("sandbox", "id-2", "s2", b"p2", Some(r#"{"env":"prod"}"#)) + .await + .unwrap(); + + let results = store + .list_with_selector("sandbox", "env=prod", 10, 0) + .await + .unwrap(); + + assert_eq!(results.len(), 1); + assert_eq!(results[0].id, "id-2"); } // --------------------------------------------------------------------------- @@ -506,3 +682,75 @@ async fn policy_isolation_between_sandboxes() { assert_eq!(s1.policy_payload, b"v1"); assert_eq!(s2.policy_payload, b"v1-s2"); } + +// ---- Label selector parsing tests ---- + +#[test] +fn parse_label_selector_empty_string() { + let result = super::parse_label_selector("").unwrap(); + assert!(result.is_empty()); +} + +#[test] +fn parse_label_selector_single_pair() { + let result = super::parse_label_selector("env=prod").unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result.get("env"), Some(&"prod".to_string())); +} + +#[test] +fn parse_label_selector_multiple_pairs() { + let result = super::parse_label_selector("env=prod,tier=frontend,version=v1").unwrap(); + assert_eq!(result.len(), 3); + assert_eq!(result.get("env"), Some(&"prod".to_string())); + assert_eq!(result.get("tier"), Some(&"frontend".to_string())); + assert_eq!(result.get("version"), Some(&"v1".to_string())); +} + +#[test] +fn parse_label_selector_accepts_empty_value() { + // Kubernetes allows empty label values, so selectors should accept "key=" format + let result = super::parse_label_selector("env=").unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result.get("env"), Some(&"".to_string())); +} + +#[test] +fn parse_label_selector_multiple_with_empty_value() { + let result = super::parse_label_selector("env=,tier=frontend").unwrap(); + assert_eq!(result.len(), 2); + assert_eq!(result.get("env"), Some(&"".to_string())); + assert_eq!(result.get("tier"), Some(&"frontend".to_string())); +} + +#[test] +fn parse_label_selector_rejects_empty_key() { + let result = super::parse_label_selector("=value"); + assert!(result.is_err()); + assert!( + result + .unwrap_err() + .to_string() + .contains("key cannot be empty") + ); +} + +#[test] +fn parse_label_selector_rejects_missing_equals() { + let result = super::parse_label_selector("env"); + assert!(result.is_err()); + assert!( + result + .unwrap_err() + .to_string() + .contains("expected 'key=value'") + ); +} + +#[test] +fn parse_label_selector_handles_whitespace() { + let result = super::parse_label_selector("env = prod , tier = frontend").unwrap(); + assert_eq!(result.len(), 2); + assert_eq!(result.get("env"), Some(&"prod".to_string())); + assert_eq!(result.get("tier"), Some(&"frontend".to_string())); +} diff --git a/crates/openshell-server/src/sandbox_index.rs b/crates/openshell-server/src/sandbox_index.rs index 2e515757d..9ad7c941b 100644 --- a/crates/openshell-server/src/sandbox_index.rs +++ b/crates/openshell-server/src/sandbox_index.rs @@ -27,18 +27,20 @@ impl SandboxIndex { pub fn update_from_sandbox(&self, sandbox: &Sandbox) { let mut inner = self.inner.write().expect("sandbox index lock poisoned"); - if !sandbox.name.is_empty() { - inner - .sandbox_name_to_id - .insert(sandbox.name.clone(), sandbox.id.clone()); - } + if let Some(metadata) = &sandbox.metadata { + if !metadata.name.is_empty() { + inner + .sandbox_name_to_id + .insert(metadata.name.clone(), metadata.id.clone()); + } - if let Some(status) = sandbox.status.as_ref() - && !status.agent_pod.is_empty() - { - inner - .agent_pod_to_id - .insert(status.agent_pod.clone(), sandbox.id.clone()); + if let Some(status) = sandbox.status.as_ref() + && !status.agent_pod.is_empty() + { + inner + .agent_pod_to_id + .insert(status.agent_pod.clone(), metadata.id.clone()); + } } } diff --git a/crates/openshell-server/src/ssh_tunnel.rs b/crates/openshell-server/src/ssh_tunnel.rs index b635efd88..6b0232fa0 100644 --- a/crates/openshell-server/src/ssh_tunnel.rs +++ b/crates/openshell-server/src/ssh_tunnel.rs @@ -15,7 +15,7 @@ use tokio::io::AsyncWriteExt; use tracing::{info, warn}; use crate::ServerState; -use crate::persistence::{ObjectId, ObjectName, ObjectType, Store}; +use crate::persistence::{ObjectType, Store}; const HEADER_SANDBOX_ID: &str = "x-sandbox-id"; const HEADER_TOKEN: &str = "x-sandbox-token"; @@ -238,18 +238,6 @@ impl ObjectType for SshSession { } } -impl ObjectId for SshSession { - fn object_id(&self) -> &str { - &self.id - } -} - -impl ObjectName for SshSession { - fn object_name(&self) -> &str { - &self.name - } -} - /// Decrement a connection count entry, removing it if it reaches zero. fn decrement_connection_count( counts: &std::sync::Mutex>, @@ -304,8 +292,12 @@ async fn reap_expired_sessions(store: &Store) -> Result<(), String> { || session.revoked; if should_delete { - if let Err(e) = store.delete(SshSession::object_type(), &session.id).await { - warn!(session_id = %session.id, error = %e, "Failed to reap SSH session"); + use openshell_core::ObjectId; + if let Err(e) = store + .delete(SshSession::object_type(), session.object_id()) + .await + { + warn!(session_id = %session.object_id(), error = %e, "Failed to reap SSH session"); } else { reaped += 1; } @@ -327,13 +319,16 @@ mod tests { fn make_session(id: &str, sandbox_id: &str, expires_at_ms: i64, revoked: bool) -> SshSession { SshSession { - id: id.to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: id.to_string(), + name: format!("session-{}", id), + created_at_ms: 1000, + labels: HashMap::new(), + }), sandbox_id: sandbox_id.to_string(), token: id.to_string(), - created_at_ms: 1000, - revoked, - name: format!("session-{id}"), expires_at_ms, + revoked, } } diff --git a/crates/openshell-server/src/supervisor_session.rs b/crates/openshell-server/src/supervisor_session.rs index d130bf71d..e712b6b5d 100644 --- a/crates/openshell-server/src/supervisor_session.rs +++ b/crates/openshell-server/src/supervisor_session.rs @@ -718,9 +718,12 @@ mod tests { fn sandbox_record(id: &str, name: &str) -> Sandbox { Sandbox { - id: id.to_string(), - name: name.to_string(), - namespace: "default".to_string(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: id.to_string(), + name: name.to_string(), + created_at_ms: 1000000, + labels: std::collections::HashMap::new(), + }), ..Default::default() } } diff --git a/crates/openshell-tui/src/app.rs b/crates/openshell-tui/src/app.rs index 6a556fd12..f397b1809 100644 --- a/crates/openshell-tui/src/app.rs +++ b/crates/openshell-tui/src/app.rs @@ -463,6 +463,8 @@ pub struct App { pub sandbox_created: Vec, pub sandbox_images: Vec, pub sandbox_notes: Vec, + /// Formatted labels for each sandbox (e.g., "env=prod,team=platform" or empty string). + pub sandbox_labels: Vec, pub sandbox_policy_versions: Vec, pub sandbox_selected: usize, pub sandbox_count: usize, @@ -544,6 +546,37 @@ pub struct App { pub approve_all_confirm_chunks: Vec, } +// --------------------------------------------------------------------------- +// Label formatting utilities +// --------------------------------------------------------------------------- + +/// Sanitize a string for safe terminal display by filtering control characters. +/// +/// Removes all control characters except newlines to prevent ANSI escape +/// sequences or other terminal manipulation. +fn sanitize_for_display(s: &str) -> String { + s.chars() + .filter(|c| !c.is_control() || *c == '\n') + .collect() +} + +/// Format object labels as a comma-separated key=value string. +/// +/// Labels are sorted by key for deterministic output. Returns an empty string +/// if the map is empty. Values are sanitized to prevent terminal escape sequences. +pub fn format_labels(labels: &HashMap) -> String { + if labels.is_empty() { + return String::new(); + } + let mut sorted: Vec<_> = labels.iter().collect(); + sorted.sort_by_key(|(k, _)| *k); + sorted + .iter() + .map(|(k, v)| format!("{}={}", sanitize_for_display(k), sanitize_for_display(v))) + .collect::>() + .join(",") +} + impl App { pub fn new( client: OpenShellClient, @@ -597,6 +630,7 @@ impl App { sandbox_created: Vec::new(), sandbox_images: Vec::new(), sandbox_notes: Vec::new(), + sandbox_labels: Vec::new(), sandbox_policy_versions: Vec::new(), sandbox_selected: 0, sandbox_count: 0, @@ -2182,6 +2216,7 @@ impl App { self.sandbox_created.clear(); self.sandbox_images.clear(); self.sandbox_notes.clear(); + self.sandbox_labels.clear(); self.sandbox_policy_versions.clear(); self.sandbox_selected = 0; self.sandbox_count = 0; diff --git a/crates/openshell-tui/src/lib.rs b/crates/openshell-tui/src/lib.rs index affdbc224..0a8caf675 100644 --- a/crates/openshell-tui/src/lib.rs +++ b/crates/openshell-tui/src/lib.rs @@ -17,6 +17,7 @@ use crossterm::terminal::{ EnterAlternateScreen, LeaveAlternateScreen, disable_raw_mode, enable_raw_mode, }; use miette::{IntoDiagnostic, Result}; +use openshell_core::metadata::{ObjectId, ObjectLabels, ObjectName}; use openshell_core::proto::open_shell_client::OpenShellClient; use ratatui::Terminal; use ratatui::backend::CrosstermBackend; @@ -193,7 +194,7 @@ pub async fn run( "-".to_string() }; app.provider_detail = Some(app::ProviderDetailView { - name: provider.name.clone(), + name: provider.object_name().to_string(), provider_type: provider.r#type.clone(), credential_key: cred_key, masked_value: masked, @@ -718,11 +719,8 @@ async fn fetch_sandbox_detail(app: &mut App) { if let Some(spec) = &sandbox.spec { app.sandbox_providers_list = spec.providers.clone(); } - if sandbox.id.is_empty() { - None - } else { - Some(sandbox.id) - } + let id = sandbox.object_id().to_string(); + if id.is_empty() { None } else { Some(id) } } else { None } @@ -799,7 +797,7 @@ async fn handle_shell_connect( }; match tokio::time::timeout(Duration::from_secs(5), app.client.get_sandbox(req)).await { Ok(Ok(resp)) => match resp.into_inner().sandbox { - Some(s) => s.id, + Some(s) => s.object_id().to_string(), None => { app.status_text = "sandbox not found".to_string(); return; @@ -948,7 +946,7 @@ async fn handle_exec_command( }; match tokio::time::timeout(Duration::from_secs(5), app.client.get_sandbox(req)).await { Ok(Ok(resp)) => match resp.into_inner().sandbox { - Some(s) => s.id, + Some(s) => s.object_id().to_string(), None => { app.status_text = format!("exec: sandbox {sandbox_name} not found"); return; @@ -1313,14 +1311,22 @@ fn spawn_create_sandbox(app: &mut App, tx: mpsc::UnboundedSender) { policy, ..Default::default() }), + labels: std::collections::HashMap::new(), }; let sandbox_name = match tokio::time::timeout(Duration::from_secs(30), client.create_sandbox(req)).await { - Ok(Ok(resp)) => resp - .into_inner() - .sandbox - .map_or_else(|| "unknown".to_string(), |s| s.name), + Ok(Ok(resp)) => resp.into_inner().sandbox.map_or_else( + || "unknown".to_string(), + |s| { + let name = s.object_name().to_string(); + if name.is_empty() { + "unknown".to_string() + } else { + name + } + }, + ), Ok(Err(e)) => { let _ = tx.send(Event::CreateResult(Err(e.message().to_string()))); return; @@ -1351,7 +1357,7 @@ fn spawn_create_sandbox(app: &mut App, tx: mpsc::UnboundedSender) { Ok(resp) => { if let Some(sandbox) = resp.into_inner().sandbox { if sandbox.phase == 2 { - break sandbox.id; + break sandbox.object_id().to_string(); } if sandbox.phase == 3 { let _ = tx.send(Event::CreateResult(Err( @@ -1558,8 +1564,12 @@ fn spawn_create_provider(app: &App, tx: mpsc::UnboundedSender) { let req = openshell_core::proto::CreateProviderRequest { provider: Some(openshell_core::proto::Provider { - id: String::new(), - name: provider_name.clone(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: provider_name.clone(), + created_at_ms: 0, + labels: std::collections::HashMap::new(), + }), r#type: ptype.clone(), credentials: credentials.clone(), config: Default::default(), @@ -1568,7 +1578,14 @@ fn spawn_create_provider(app: &App, tx: mpsc::UnboundedSender) { match client.create_provider(req).await { Ok(resp) => { - let final_name = resp.into_inner().provider.map_or(provider_name, |p| p.name); + let final_name = resp.into_inner().provider.map_or(provider_name, |p| { + let name = p.object_name().to_string(); + if name.is_empty() { + "unknown".to_string() + } else { + name + } + }); let _ = tx.send(Event::ProviderCreateResult(Ok(final_name))); return; } @@ -1637,8 +1654,12 @@ fn spawn_update_provider(app: &App, tx: mpsc::UnboundedSender) { let req = openshell_core::proto::UpdateProviderRequest { provider: Some(openshell_core::proto::Provider { - id: String::new(), - name: name.clone(), + metadata: Some(openshell_core::proto::datamodel::v1::ObjectMeta { + id: String::new(), + name: name.clone(), + created_at_ms: 0, + labels: std::collections::HashMap::new(), + }), r#type: ptype, credentials, config: Default::default(), @@ -1865,7 +1886,10 @@ async fn refresh_providers(app: &mut App) { Ok(Ok(resp)) => { let providers = resp.into_inner().providers; app.provider_count = providers.len(); - app.provider_names = providers.iter().map(|p| p.name.clone()).collect(); + app.provider_names = providers + .iter() + .map(|p| p.object_name().to_string()) + .collect(); app.provider_types = providers.iter().map(|p| p.r#type.clone()).collect(); app.provider_cred_keys = providers .iter() @@ -2162,6 +2186,7 @@ async fn refresh_sandboxes(app: &mut App) { let req = openshell_core::proto::ListSandboxesRequest { limit: 100, offset: 0, + label_selector: String::new(), }; let result = tokio::time::timeout(Duration::from_secs(5), app.client.list_sandboxes(req)).await; match result { @@ -2174,8 +2199,14 @@ async fn refresh_sandboxes(app: &mut App) { Ok(Ok(resp)) => { let sandboxes = resp.into_inner().sandboxes; app.sandbox_count = sandboxes.len(); - app.sandbox_ids = sandboxes.iter().map(|s| s.id.clone()).collect(); - app.sandbox_names = sandboxes.iter().map(|s| s.name.clone()).collect(); + app.sandbox_ids = sandboxes + .iter() + .map(|s| s.object_id().to_string()) + .collect(); + app.sandbox_names = sandboxes + .iter() + .map(|s| s.object_name().to_string()) + .collect(); app.sandbox_phases = sandboxes.iter().map(|s| phase_label(s.phase)).collect(); app.sandbox_images = sandboxes .iter() @@ -2191,11 +2222,21 @@ async fn refresh_sandboxes(app: &mut App) { .collect(); app.sandbox_ages = sandboxes .iter() - .map(|s| format_age(s.created_at_ms)) + .map(|s| { + s.metadata + .as_ref() + .map(|m| format_age(m.created_at_ms)) + .unwrap_or_else(|| "?".to_string()) + }) .collect(); app.sandbox_created = sandboxes .iter() - .map(|s| format_timestamp(s.created_at_ms)) + .map(|s| { + s.metadata + .as_ref() + .map(|m| format_timestamp(m.created_at_ms)) + .unwrap_or_else(|| "?".to_string()) + }) .collect(); app.sandbox_policy_versions = @@ -2205,7 +2246,21 @@ async fn refresh_sandboxes(app: &mut App) { let forwards = openshell_core::forward::list_forwards().unwrap_or_default(); app.sandbox_notes = sandboxes .iter() - .map(|s| openshell_core::forward::build_sandbox_notes(&s.name, &forwards)) + .map(|s| { + let name = s.object_name(); + openshell_core::forward::build_sandbox_notes(name, &forwards) + }) + .collect(); + + // Build LABELS column from metadata. + app.sandbox_labels = sandboxes + .iter() + .map(|s| { + s.object_labels() + .as_ref() + .map(|labels| app::format_labels(labels)) + .unwrap_or_default() + }) .collect(); if app.sandbox_selected >= app.sandbox_count && app.sandbox_count > 0 { diff --git a/crates/openshell-tui/src/ui/sandbox_detail.rs b/crates/openshell-tui/src/ui/sandbox_detail.rs index 0eab11782..7ab725e9d 100644 --- a/crates/openshell-tui/src/ui/sandbox_detail.rs +++ b/crates/openshell-tui/src/ui/sandbox_detail.rs @@ -73,29 +73,42 @@ pub fn draw(frame: &mut Frame<'_>, app: &App, area: Rect) { Span::styled(age, t.text), ]); - // Row 3: Providers + // Row 3: Labels + let labels_str = app + .sandbox_labels + .get(idx) + .filter(|s| !s.is_empty()) + .map(String::as_str) + .unwrap_or("none"); + let row3 = Line::from(vec![ + Span::styled(" Labels: ", t.muted), + Span::styled(labels_str, t.text), + ]); + + // Row 4: Providers let providers_str = if app.sandbox_providers_list.is_empty() { "none".to_string() } else { app.sandbox_providers_list.join(", ") }; - let row3 = Line::from(vec![ + let row4 = Line::from(vec![ Span::styled(" Providers: ", t.muted), Span::styled(providers_str, t.text), ]); - // Row 4: Forwarded Ports + // Row 5: Forwarded Ports let forwards_str = app .sandbox_notes .get(idx) .filter(|s| !s.is_empty()) - .map_or_else(|| "none".to_string(), Clone::clone); - let row4 = Line::from(vec![ + .map(String::as_str) + .unwrap_or("none"); + let row5 = Line::from(vec![ Span::styled(" Forwards: ", t.muted), Span::styled(forwards_str, t.text), ]); - let mut lines = vec![Line::from(""), row1, row2, row3, row4]; + let mut lines = vec![Line::from(""), row1, row2, row3, row4, row5]; // Show global policy indicator when the sandbox's policy is managed at // gateway scope. diff --git a/crates/openshell-tui/src/ui/sandboxes.rs b/crates/openshell-tui/src/ui/sandboxes.rs index 39f6411c4..3fb8e3a3b 100644 --- a/crates/openshell-tui/src/ui/sandboxes.rs +++ b/crates/openshell-tui/src/ui/sandboxes.rs @@ -16,6 +16,7 @@ pub fn draw(frame: &mut Frame<'_>, app: &App, area: Rect, focused: bool) { Cell::from(Span::styled("CREATED", t.muted)), Cell::from(Span::styled("AGE", t.muted)), Cell::from(Span::styled("IMAGE", t.muted)), + Cell::from(Span::styled("LABELS", t.muted)), Cell::from(Span::styled("NOTES", t.muted)), ]) .bottom_margin(1); @@ -27,6 +28,7 @@ pub fn draw(frame: &mut Frame<'_>, app: &App, area: Rect, focused: bool) { let created = app.sandbox_created.get(i).map_or("", String::as_str); let age = app.sandbox_ages.get(i).map_or("", String::as_str); let image = app.sandbox_images.get(i).map_or("", String::as_str); + let labels = app.sandbox_labels.get(i).map_or("", String::as_str); let notes = app.sandbox_notes.get(i).map_or("", String::as_str); let draft_count = app.sandbox_draft_counts.get(i).copied().unwrap_or(0); @@ -64,6 +66,7 @@ pub fn draw(frame: &mut Frame<'_>, app: &App, area: Rect, focused: bool) { Cell::from(Span::styled(created, t.muted)), Cell::from(Span::styled(age, t.muted)), Cell::from(Span::styled(image, t.muted)), + Cell::from(Span::styled(labels, t.muted)), Cell::from(Span::styled(notes, t.muted)), ]) }) @@ -74,8 +77,9 @@ pub fn draw(frame: &mut Frame<'_>, app: &App, area: Rect, focused: bool) { Constraint::Percentage(10), Constraint::Percentage(15), Constraint::Percentage(8), - Constraint::Percentage(27), Constraint::Percentage(20), + Constraint::Percentage(15), + Constraint::Percentage(12), ]; let border_style = if focused { t.border_focused } else { t.border }; diff --git a/e2e/rust/tests/sandbox_labels.rs b/e2e/rust/tests/sandbox_labels.rs new file mode 100644 index 000000000..5b682ed26 --- /dev/null +++ b/e2e/rust/tests/sandbox_labels.rs @@ -0,0 +1,242 @@ +// SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +use std::process::Stdio; + +use openshell_e2e::harness::binary::openshell_cmd; +use openshell_e2e::harness::output::{extract_field, strip_ansi}; + +fn normalize_output(output: &str) -> String { + let stripped = strip_ansi(output).replace('\r', ""); + let mut cleaned = String::with_capacity(stripped.len()); + + for ch in stripped.chars() { + match ch { + '\u{8}' => { + cleaned.pop(); + } + '\u{4}' => {} + _ => cleaned.push(ch), + } + } + + cleaned +} + +fn extract_sandbox_name(output: &str) -> Option { + if let Some((_, rest)) = output.split_once("Created sandbox:") { + return rest.split_whitespace().next().map(ToOwned::to_owned); + } + + extract_field(output, "Created sandbox").or_else(|| extract_field(output, "Name")) +} + +async fn create_sandbox_with_labels(name: &str, labels: &[(&str, &str)]) -> String { + let mut cmd = openshell_cmd(); + cmd.args(["sandbox", "create", "--name", name]); + + for (key, value) in labels { + cmd.arg("--label").arg(format!("{key}={value}")); + } + + cmd.args(["--", "echo", "test"]) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + let output = cmd.output().await.expect("spawn openshell sandbox create"); + let stdout = String::from_utf8_lossy(&output.stdout).to_string(); + let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + let combined = normalize_output(&format!("{stdout}{stderr}")); + + assert!( + output.status.success(), + "sandbox create should succeed (exit {:?}):\n{combined}", + output.status.code() + ); + + extract_sandbox_name(&combined).expect("sandbox name should be present in output") +} + +async fn list_sandboxes_with_selector(selector: &str) -> Vec { + let mut cmd = openshell_cmd(); + cmd.args(["sandbox", "list", "--names"]); + + if !selector.is_empty() { + cmd.arg("--selector").arg(selector); + } + + cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); + + let output = cmd.output().await.expect("spawn openshell sandbox list"); + let stdout = String::from_utf8_lossy(&output.stdout).to_string(); + let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + let combined = normalize_output(&format!("{stdout}{stderr}")); + + assert!( + output.status.success(), + "sandbox list should succeed (exit {:?}):\n{combined}", + output.status.code() + ); + + combined + .lines() + .map(str::trim) + .filter(|line| !line.is_empty()) + .map(ToOwned::to_owned) + .collect() +} + +async fn get_sandbox_details(name: &str) -> String { + let mut cmd = openshell_cmd(); + cmd.args(["sandbox", "get", name]) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + let output = cmd.output().await.expect("spawn openshell sandbox get"); + let stdout = String::from_utf8_lossy(&output.stdout).to_string(); + let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + let combined = normalize_output(&format!("{stdout}{stderr}")); + + assert!( + output.status.success(), + "sandbox get should succeed (exit {:?}):\n{combined}", + output.status.code() + ); + + combined +} + +async fn delete_sandbox(name: &str) { + let mut cmd = openshell_cmd(); + cmd.args(["sandbox", "delete", name]) + .stdout(Stdio::null()) + .stderr(Stdio::null()); + let _ = cmd.status().await; +} + +#[tokio::test] +async fn sandbox_labels_are_stored_and_filterable() { + // Create sandboxes with different labels + let name1 = create_sandbox_with_labels( + "e2e-label-test-dev-backend", + &[("env", "dev"), ("team", "backend")], + ) + .await; + + let name2 = create_sandbox_with_labels( + "e2e-label-test-staging-backend", + &[("env", "staging"), ("team", "backend")], + ) + .await; + + let name3 = create_sandbox_with_labels( + "e2e-label-test-prod-frontend", + &[("env", "prod"), ("team", "frontend")], + ) + .await; + + let name4 = create_sandbox_with_labels( + "e2e-label-test-dev-data", + &[("env", "dev"), ("team", "data")], + ) + .await; + + // Test 1: Verify labels are stored in sandbox metadata + let details = get_sandbox_details(&name1).await; + assert!( + details.contains("env: dev"), + "sandbox should have env=dev label in details:\n{details}" + ); + assert!( + details.contains("team: backend"), + "sandbox should have team=backend label in details:\n{details}" + ); + + // Test 2: Filter by single label (env=dev) + let dev_sandboxes = list_sandboxes_with_selector("env=dev").await; + assert!( + dev_sandboxes.contains(&name1), + "env=dev filter should include {name1}, got: {dev_sandboxes:?}" + ); + assert!( + dev_sandboxes.contains(&name4), + "env=dev filter should include {name4}, got: {dev_sandboxes:?}" + ); + assert!( + !dev_sandboxes.contains(&name2), + "env=dev filter should not include staging sandbox {name2}, got: {dev_sandboxes:?}" + ); + assert!( + !dev_sandboxes.contains(&name3), + "env=dev filter should not include prod sandbox {name3}, got: {dev_sandboxes:?}" + ); + + // Test 3: Filter by single label (team=backend) + let backend_sandboxes = list_sandboxes_with_selector("team=backend").await; + assert!( + backend_sandboxes.contains(&name1), + "team=backend filter should include {name1}, got: {backend_sandboxes:?}" + ); + assert!( + backend_sandboxes.contains(&name2), + "team=backend filter should include {name2}, got: {backend_sandboxes:?}" + ); + assert!( + !backend_sandboxes.contains(&name3), + "team=backend filter should not include frontend sandbox {name3}, got: {backend_sandboxes:?}" + ); + assert!( + !backend_sandboxes.contains(&name4), + "team=backend filter should not include data sandbox {name4}, got: {backend_sandboxes:?}" + ); + + // Test 4: Filter by multiple labels (AND logic: env=dev,team=backend) + let dev_backend_sandboxes = list_sandboxes_with_selector("env=dev,team=backend").await; + assert_eq!( + dev_backend_sandboxes + .iter() + .filter(|name| [&name1, &name2, &name3, &name4].contains(&name)) + .count(), + 1, + "env=dev,team=backend filter should return exactly 1 sandbox, got: {dev_backend_sandboxes:?}" + ); + assert!( + dev_backend_sandboxes.contains(&name1), + "env=dev,team=backend filter should include {name1}, got: {dev_backend_sandboxes:?}" + ); + + // Test 5: Filter by non-existent label value + let qa_sandboxes = list_sandboxes_with_selector("team=qa").await; + assert!( + !qa_sandboxes.contains(&name1) + && !qa_sandboxes.contains(&name2) + && !qa_sandboxes.contains(&name3) + && !qa_sandboxes.contains(&name4), + "team=qa filter should not return any of our test sandboxes, got: {qa_sandboxes:?}" + ); + + // Test 6: List all sandboxes (no filter) + let all_sandboxes = list_sandboxes_with_selector("").await; + assert!( + all_sandboxes.contains(&name1), + "list without filter should include all test sandboxes" + ); + assert!( + all_sandboxes.contains(&name2), + "list without filter should include all test sandboxes" + ); + assert!( + all_sandboxes.contains(&name3), + "list without filter should include all test sandboxes" + ); + assert!( + all_sandboxes.contains(&name4), + "list without filter should include all test sandboxes" + ); + + // Cleanup + delete_sandbox(&name1).await; + delete_sandbox(&name2).await; + delete_sandbox(&name3).await; + delete_sandbox(&name4).await; +} diff --git a/proto/datamodel.proto b/proto/datamodel.proto index f84d1e352..534b043ae 100644 --- a/proto/datamodel.proto +++ b/proto/datamodel.proto @@ -5,14 +5,33 @@ syntax = "proto3"; package openshell.datamodel.v1; -// Provider model stored by OpenShell. -message Provider { +// Kubernetes-style metadata shared by all top-level OpenShell domain objects. +// +// This structure provides consistent metadata (identity, labels, timestamps, +// versioning) across Sandbox, Provider, SshSession, and other resources. +message ObjectMeta { + // Stable object ID generated by the gateway. string id = 1; + + // Human-readable object name (unique per object type). string name = 2; + + // Milliseconds since Unix epoch when the object was created. + int64 created_at_ms = 3; + + // Key-value labels for filtering and organization. + // Labels must follow Kubernetes conventions: alphanumeric + `-._/`, max 63 chars per segment. + map labels = 4; +} + +// Provider model stored by OpenShell. +message Provider { + // Kubernetes-style metadata (id, name, labels, timestamps, resource version). + ObjectMeta metadata = 1; // Canonical provider type slug (for example: "claude", "gitlab"). - string type = 3; + string type = 2; // Secret values used for authentication. - map credentials = 4; + map credentials = 3; // Non-secret provider configuration. - map config = 5; + map config = 4; } diff --git a/proto/inference.proto b/proto/inference.proto index a14ce70ea..743f245f9 100644 --- a/proto/inference.proto +++ b/proto/inference.proto @@ -5,6 +5,8 @@ syntax = "proto3"; package openshell.inference.v1; +import "datamodel.proto"; + // Inference service provides cluster inference configuration and bundle delivery. service Inference { // Return the resolved inference route bundle for sandbox-local execution. @@ -38,13 +40,10 @@ message ClusterInferenceConfig { // Storage envelope for the managed cluster inference route. message InferenceRoute { - string id = 1; + openshell.datamodel.v1.ObjectMeta metadata = 1; ClusterInferenceConfig config = 2; - // Object name ("inference.local" for the user-facing route, - // "sandbox-system" for the sandbox system-level route). - string name = 3; // Monotonic version incremented on every update. - uint64 version = 4; + uint64 version = 3; } message SetClusterInferenceRequest { diff --git a/proto/openshell.proto b/proto/openshell.proto index 2434f1a80..48a8fece2 100644 --- a/proto/openshell.proto +++ b/proto/openshell.proto @@ -174,23 +174,20 @@ message HealthResponse { // This is the canonical gateway-owned view of a sandbox. It merges user intent // (`spec`) with gateway-managed metadata and status derived from internal // compute-driver observations. +// +// Note: The `namespace` field has been removed from the public API. It remains +// in the internal `DriverSandbox` message as a compute-driver implementation detail. message Sandbox { - // Stable sandbox ID generated by the gateway. - string id = 1; - // User-visible sandbox name. - string name = 2; - // Namespace used by the backing compute platform. - string namespace = 3; + // Kubernetes-style metadata (id, name, labels, timestamps, resource version). + openshell.datamodel.v1.ObjectMeta metadata = 1; // Desired sandbox configuration submitted through the API. - SandboxSpec spec = 4; + SandboxSpec spec = 2; // Latest user-facing observed status derived by the gateway. - SandboxStatus status = 5; + SandboxStatus status = 3; // Gateway-derived lifecycle summary. - SandboxPhase phase = 6; - // Milliseconds since Unix epoch when the sandbox was created. - int64 created_at_ms = 7; + SandboxPhase phase = 4; // Currently active policy version (updated when sandbox reports loaded). - uint32 current_policy_version = 8; + uint32 current_policy_version = 5; } // Desired sandbox configuration provided through the public API. @@ -294,6 +291,8 @@ message CreateSandboxRequest { SandboxSpec spec = 1; // Optional user-supplied sandbox name. When empty the server generates one. string name = 2; + // Optional labels for the sandbox (key-value metadata). + map labels = 3; } // Get sandbox request. @@ -306,6 +305,8 @@ message GetSandboxRequest { message ListSandboxesRequest { uint32 limit = 1; uint32 offset = 2; + // Optional label selector for filtering (format: "key1=value1,key2=value2"). + string label_selector = 3; } // Delete sandbox request. @@ -436,8 +437,8 @@ message ExecSandboxEvent { // SSH session record stored in persistence. message SshSession { - // Unique id (token). - string id = 1; + // Kubernetes-style metadata (id, name, labels, timestamps, resource version). + openshell.datamodel.v1.ObjectMeta metadata = 1; // Sandbox id. string sandbox_id = 2; @@ -445,18 +446,12 @@ message SshSession { // Session token. string token = 3; - // Creation timestamp in milliseconds since epoch. - int64 created_at_ms = 4; + // Expiry timestamp in milliseconds since epoch. 0 means no expiry + // (backward-compatible default for sessions created before this field existed). + int64 expires_at_ms = 4; // Revoked flag. bool revoked = 5; - - // Human-friendly name (auto-generated if not provided). - string name = 6; - - // Expiry timestamp in milliseconds since epoch. 0 means no expiry - // (backward-compatible default for sessions created before this field existed). - int64 expires_at_ms = 7; } // Watch sandbox request. diff --git a/python/openshell/sandbox.py b/python/openshell/sandbox.py index eba79f0fd..a624c694d 100644 --- a/python/openshell/sandbox.py +++ b/python/openshell/sandbox.py @@ -38,7 +38,6 @@ class TlsConfig: class SandboxRef: id: str name: str - namespace: str phase: int @@ -577,9 +576,8 @@ def _serialize_python_callable( def _sandbox_ref(sandbox: openshell_pb2.Sandbox) -> SandboxRef: return SandboxRef( - id=sandbox.id, - name=sandbox.name, - namespace=sandbox.namespace, + id=sandbox.metadata.id if sandbox.metadata else "", + name=sandbox.metadata.name if sandbox.metadata else "", phase=sandbox.phase, ) diff --git a/tasks/python.toml b/tasks/python.toml index 1e53da7fb..31155bf6a 100644 --- a/tasks/python.toml +++ b/tasks/python.toml @@ -235,6 +235,9 @@ from pathlib import Path import re line_rewrites = { + "python/openshell/_proto/inference_pb2.py": [ + (r"^import datamodel_pb2 as datamodel__pb2$", "from . import datamodel_pb2 as datamodel__pb2"), + ], "python/openshell/_proto/inference_pb2_grpc.py": [ (r"^import inference_pb2 as inference__pb2$", "from . import inference_pb2 as inference__pb2"), ],