From cd690057694559cebc969c8f36583b85bb4aa848 Mon Sep 17 00:00:00 2001 From: Harsh Rawat Date: Sat, 21 Mar 2026 17:01:06 +0530 Subject: [PATCH 1/7] [shimV2] add plan9 device controller This change adds the plan9 device controller which can add/remove plan9 shares from a VM. The guest side operations are part of mount controller responsibility. Signed-off-by: Harsh Rawat --- internal/controller/device/plan9/doc.go | 21 +++ internal/controller/device/plan9/interface.go | 44 ++++++ internal/controller/device/plan9/plan9.go | 142 ++++++++++++++++++ internal/vm/vmmanager/plan9.go | 11 -- 4 files changed, 207 insertions(+), 11 deletions(-) create mode 100644 internal/controller/device/plan9/doc.go create mode 100644 internal/controller/device/plan9/interface.go create mode 100644 internal/controller/device/plan9/plan9.go diff --git a/internal/controller/device/plan9/doc.go b/internal/controller/device/plan9/doc.go new file mode 100644 index 0000000000..f61709877d --- /dev/null +++ b/internal/controller/device/plan9/doc.go @@ -0,0 +1,21 @@ +//go:build windows && !wcow + +// Package plan9 provides a controller for managing Plan9 file-share devices +// attached to a Utility VM (UVM). +// +// It handles attaching and detaching Plan9 shares to the VM via HCS modify +// calls. Guest-side mount operations (mapped-directory requests) are handled +// separately by the mount controller. +// +// The [Controller] interface is the primary entry point, with [Manager] as its +// concrete implementation. A single [Manager] manages all Plan9 shares for a UVM. +// +// # Lifecycle +// +// [Manager] tracks active shares by name in an internal map. +// +// - [Controller.AddToVM] adds a share and records its name in the map. +// If the HCS call fails, the share is not recorded. +// - [Controller.RemoveFromVM] removes a share and deletes its name from the map. +// If the share is not in the map, the call is a no-op. +package plan9 diff --git a/internal/controller/device/plan9/interface.go b/internal/controller/device/plan9/interface.go new file mode 100644 index 0000000000..d1c5e78e39 --- /dev/null +++ b/internal/controller/device/plan9/interface.go @@ -0,0 +1,44 @@ +//go:build windows && !wcow + +package plan9 + +import ( + "context" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// Controller manages the lifecycle of Plan9 shares attached to a UVM. +type Controller interface { + // AddToVM adds a Plan9 share to the host VM and returns the generated share name. + // Guest-side mount is handled separately by the mount controller. + AddToVM(ctx context.Context, opts *AddOptions) (string, error) + + // RemoveFromVM removes a Plan9 share identified by shareName from the host VM. + RemoveFromVM(ctx context.Context, shareName string) error +} + +// AddOptions holds the configuration required to add a Plan9 share to the VM. +type AddOptions struct { + // HostPath is the path on the host to share into the VM. + HostPath string + + // ReadOnly indicates whether the share should be mounted read-only. + ReadOnly bool + + // Restrict enables single-file mapping mode for the share. + Restrict bool + + // AllowedNames is the list of file names allowed when Restrict is true. + AllowedNames []string +} + +// vmPlan9Manager manages adding and removing Plan9 shares on the host VM. +// Implemented by [vmmanager.UtilityVM]. +type vmPlan9Manager interface { + // AddPlan9 adds a plan 9 share to a running Utility VM. + AddPlan9(ctx context.Context, settings hcsschema.Plan9Share) error + + // RemovePlan9 removes a plan 9 share from a running Utility VM. + RemovePlan9(ctx context.Context, settings hcsschema.Plan9Share) error +} diff --git a/internal/controller/device/plan9/plan9.go b/internal/controller/device/plan9/plan9.go new file mode 100644 index 0000000000..33749eea63 --- /dev/null +++ b/internal/controller/device/plan9/plan9.go @@ -0,0 +1,142 @@ +//go:build windows && !wcow + +package plan9 + +import ( + "context" + "fmt" + "strconv" + "sync" + + "github.com/Microsoft/hcsshim/internal/hcs" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/vm/vmutils" + + "github.com/sirupsen/logrus" +) + +// share-flag constants used in Plan9 HCS requests. +// +// These are marked private in the HCS schema. When public variants become +// available, we should replace these. +const ( + shareFlagsReadOnly int32 = 0x00000001 + shareFlagsLinuxMetadata int32 = 0x00000004 + shareFlagsRestrictFileAccess int32 = 0x00000080 +) + +// Manager is the concrete implementation of [Controller]. +type Manager struct { + mu sync.Mutex + + // shares is the set of currently configured Plan9 share names. + // Guarded by mu. + shares map[string]struct{} + + // noWritableFileShares disallows adding writable Plan9 shares. + noWritableFileShares bool + + // vmPlan9Mgr performs host-side Plan9 add/remove on the VM. + vmPlan9Mgr vmPlan9Manager + + // nameCounter is the monotonically increasing index used to + // generate unique share names. Guarded by mu. + nameCounter uint64 +} + +var _ Controller = (*Manager)(nil) + +// New creates a ready-to-use [Manager]. +func New( + vmPlan9Mgr vmPlan9Manager, + noWritableFileShares bool, +) *Manager { + return &Manager{ + vmPlan9Mgr: vmPlan9Mgr, + noWritableFileShares: noWritableFileShares, + shares: make(map[string]struct{}), + } +} + +// AddToVM adds a Plan9 share to the host VM and returns the generated share name. +func (m *Manager) AddToVM(ctx context.Context, opts *AddOptions) (_ string, err error) { + m.mu.Lock() + defer m.mu.Unlock() + + // Ensure that adding the share is allowed. + if !opts.ReadOnly && m.noWritableFileShares { + return "", fmt.Errorf("adding writable shares is denied: %w", hcs.ErrOperationDenied) + } + + // Build the Plan9 share flags bitmask from the caller-provided options. + flags := shareFlagsLinuxMetadata + if opts.ReadOnly { + flags |= shareFlagsReadOnly + } + if opts.Restrict { + flags |= shareFlagsRestrictFileAccess + } + + // Generate a unique share name from the nameCounter. + name := strconv.FormatUint(m.nameCounter, 10) + m.nameCounter++ + + ctx, _ = log.WithContext(ctx, logrus.WithField("shareName", name)) + + log.G(ctx).WithFields(logrus.Fields{ + logfields.HostPath: opts.HostPath, + logfields.ReadOnly: opts.ReadOnly, + "RestrictFileAccess": opts.Restrict, + "AllowedFiles": opts.AllowedNames, + }).Tracef("adding plan9 share to host VM") + + // Call into HCS to add the Plan9 share to the VM. + if err := m.vmPlan9Mgr.AddPlan9(ctx, hcsschema.Plan9Share{ + Name: name, + AccessName: name, + Path: opts.HostPath, + Port: vmutils.Plan9Port, + Flags: flags, + AllowedFiles: opts.AllowedNames, + }); err != nil { + return "", fmt.Errorf("add plan9 share %s to host: %w", name, err) + } + + m.shares[name] = struct{}{} + + log.G(ctx).Info("plan9 share added to host VM") + + return name, nil +} + +// RemoveFromVM removes the Plan9 share identified by shareName from the host VM. +func (m *Manager) RemoveFromVM(ctx context.Context, shareName string) error { + m.mu.Lock() + defer m.mu.Unlock() + + ctx, _ = log.WithContext(ctx, logrus.WithField("shareName", shareName)) + + if _, ok := m.shares[shareName]; !ok { + log.G(ctx).Debug("plan9 share not found, skipping removal") + return nil + } + + log.G(ctx).Debug("starting plan9 share removal") + + // Call into HCS to remove the share from the VM. + if err := m.vmPlan9Mgr.RemovePlan9(ctx, hcsschema.Plan9Share{ + Name: shareName, + AccessName: shareName, + Port: vmutils.Plan9Port, + }); err != nil { + return fmt.Errorf("remove plan9 share %s from host: %w", shareName, err) + } + + delete(m.shares, shareName) + + log.G(ctx).Info("plan9 share removed from host VM") + + return nil +} diff --git a/internal/vm/vmmanager/plan9.go b/internal/vm/vmmanager/plan9.go index 3effcf8ac2..9222268477 100644 --- a/internal/vm/vmmanager/plan9.go +++ b/internal/vm/vmmanager/plan9.go @@ -11,17 +11,6 @@ import ( "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" ) -// Plan9Manager manages adding plan 9 shares to a Utility VM. -type Plan9Manager interface { - // AddPlan9 adds a plan 9 share to a running Utility VM. - AddPlan9(ctx context.Context, settings hcsschema.Plan9Share) error - - // RemovePlan9 removes a plan 9 share from a running Utility VM. - RemovePlan9(ctx context.Context, settings hcsschema.Plan9Share) error -} - -var _ Plan9Manager = (*UtilityVM)(nil) - func (uvm *UtilityVM) AddPlan9(ctx context.Context, settings hcsschema.Plan9Share) error { modification := &hcsschema.ModifySettingRequest{ RequestType: guestrequest.RequestTypeAdd, From d18fc91734c416e47241c980d07778fd71c08d93 Mon Sep 17 00:00:00 2001 From: Harsh Rawat Date: Wed, 25 Mar 2026 20:54:27 +0530 Subject: [PATCH 2/7] review comments: 1 Signed-off-by: Harsh Rawat --- internal/controller/device/plan9/doc.go | 61 ++++- internal/controller/device/plan9/interface.go | 44 ---- internal/controller/device/plan9/plan9.go | 228 +++++++++++++----- internal/controller/device/plan9/state.go | 55 +++++ internal/controller/device/plan9/types.go | 85 +++++++ 5 files changed, 360 insertions(+), 113 deletions(-) delete mode 100644 internal/controller/device/plan9/interface.go create mode 100644 internal/controller/device/plan9/state.go create mode 100644 internal/controller/device/plan9/types.go diff --git a/internal/controller/device/plan9/doc.go b/internal/controller/device/plan9/doc.go index f61709877d..08c2ddc9b7 100644 --- a/internal/controller/device/plan9/doc.go +++ b/internal/controller/device/plan9/doc.go @@ -1,21 +1,62 @@ //go:build windows && !wcow -// Package plan9 provides a controller for managing Plan9 file-share devices +// Package plan9 provides a manager for managing Plan9 file-share devices // attached to a Utility VM (UVM). // -// It handles attaching and detaching Plan9 shares to the VM via HCS modify +// It handles adding and removing Plan9 shares on the host side via HCS modify // calls. Guest-side mount operations (mapped-directory requests) are handled -// separately by the mount controller. +// separately by the mount manager. // -// The [Controller] interface is the primary entry point, with [Manager] as its -// concrete implementation. A single [Manager] manages all Plan9 shares for a UVM. +// # Deduplication and Reference Counting +// +// [Manager] deduplicates shares: if two callers add a share with identical +// [AddOptions], the second call reuses the existing share and increments an +// internal reference count rather than issuing a second HCS call. The share is +// only removed from the VM when the last caller invokes [Manager.RemoveFromVM]. // // # Lifecycle // -// [Manager] tracks active shares by name in an internal map. +// Each share progresses through the states below. +// The happy path runs down the left column; the error path is on the right. +// +// Allocate entry for the share +// │ +// ▼ +// ┌─────────────────────┐ +// │ sharePending │ +// └──────────┬──────────┘ +// │ +// ┌───────┴────────────────────────────────┐ +// │ AddPlan9 succeeds │ AddPlan9 fails +// ▼ ▼ +// ┌─────────────────────┐ ┌──────────────────────┐ +// │ shareAdded │ │ shareInvalid │ +// └──────────┬──────────┘ └──────────────────────┘ +// │ RemovePlan9 succeeds (auto-removed from map) +// ▼ +// ┌─────────────────────┐ +// │ shareRemoved │ ← terminal; entry removed from map +// └─────────────────────┘ +// +// State descriptions: +// +// - [sharePending]: entered when a new entry is allocated (by [Manager.ResolveShareName] +// or the first [Manager.AddToVM] call). No HCS call has been made yet. +// - [shareAdded]: entered once [vmPlan9Manager.AddPlan9] succeeds; +// the share is live on the VM. +// - [shareInvalid]: entered when [vmPlan9Manager.AddPlan9] fails; +// the map entry is removed immediately so the next call can retry. +// - [shareRemoved]: terminal state entered once [vmPlan9Manager.RemovePlan9] succeeds. +// +// Method summary: // -// - [Controller.AddToVM] adds a share and records its name in the map. -// If the HCS call fails, the share is not recorded. -// - [Controller.RemoveFromVM] removes a share and deletes its name from the map. -// If the share is not in the map, the call is a no-op. +// - [Manager.ResolveShareName] pre-allocates a share name for the given [AddOptions] +// without issuing any HCS call. If a matching share is already tracked, +// the existing name is returned. This is useful for resolving downstream +// resource paths (e.g., guest mount paths) before the share is live. +// - [Manager.AddToVM] attaches the share, driving the HCS AddPlan9 call on +// the first caller and incrementing the reference count on subsequent ones. +// If the HCS call fails, the entry is removed so the next call can retry. +// - [Manager.RemoveFromVM] decrements the reference count and tears down the +// share only when the count reaches zero. package plan9 diff --git a/internal/controller/device/plan9/interface.go b/internal/controller/device/plan9/interface.go deleted file mode 100644 index d1c5e78e39..0000000000 --- a/internal/controller/device/plan9/interface.go +++ /dev/null @@ -1,44 +0,0 @@ -//go:build windows && !wcow - -package plan9 - -import ( - "context" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// Controller manages the lifecycle of Plan9 shares attached to a UVM. -type Controller interface { - // AddToVM adds a Plan9 share to the host VM and returns the generated share name. - // Guest-side mount is handled separately by the mount controller. - AddToVM(ctx context.Context, opts *AddOptions) (string, error) - - // RemoveFromVM removes a Plan9 share identified by shareName from the host VM. - RemoveFromVM(ctx context.Context, shareName string) error -} - -// AddOptions holds the configuration required to add a Plan9 share to the VM. -type AddOptions struct { - // HostPath is the path on the host to share into the VM. - HostPath string - - // ReadOnly indicates whether the share should be mounted read-only. - ReadOnly bool - - // Restrict enables single-file mapping mode for the share. - Restrict bool - - // AllowedNames is the list of file names allowed when Restrict is true. - AllowedNames []string -} - -// vmPlan9Manager manages adding and removing Plan9 shares on the host VM. -// Implemented by [vmmanager.UtilityVM]. -type vmPlan9Manager interface { - // AddPlan9 adds a plan 9 share to a running Utility VM. - AddPlan9(ctx context.Context, settings hcsschema.Plan9Share) error - - // RemovePlan9 removes a plan 9 share from a running Utility VM. - RemovePlan9(ctx context.Context, settings hcsschema.Plan9Share) error -} diff --git a/internal/controller/device/plan9/plan9.go b/internal/controller/device/plan9/plan9.go index 33749eea63..a8b7cc6210 100644 --- a/internal/controller/device/plan9/plan9.go +++ b/internal/controller/device/plan9/plan9.go @@ -27,13 +27,14 @@ const ( shareFlagsRestrictFileAccess int32 = 0x00000080 ) -// Manager is the concrete implementation of [Controller]. +// Manager is the concrete implementation which manages plan9 shares to the UVM. type Manager struct { + // mu protects the shares map and serializes name allocation across concurrent callers. mu sync.Mutex - // shares is the set of currently configured Plan9 share names. - // Guarded by mu. - shares map[string]struct{} + // shares maps share name → shareEntry for every active or pending share. + // Access must be guarded by mu. + shares map[string]*shareEntry // noWritableFileShares disallows adding writable Plan9 shares. noWritableFileShares bool @@ -41,13 +42,11 @@ type Manager struct { // vmPlan9Mgr performs host-side Plan9 add/remove on the VM. vmPlan9Mgr vmPlan9Manager - // nameCounter is the monotonically increasing index used to - // generate unique share names. Guarded by mu. + // nameCounter is the monotonically increasing index used to generate unique share names. + // Access must be guarded by mu. nameCounter uint64 } -var _ Controller = (*Manager)(nil) - // New creates a ready-to-use [Manager]. func New( vmPlan9Mgr vmPlan9Manager, @@ -56,87 +55,198 @@ func New( return &Manager{ vmPlan9Mgr: vmPlan9Mgr, noWritableFileShares: noWritableFileShares, - shares: make(map[string]struct{}), + shares: make(map[string]*shareEntry), } } +// ResolveShareName pre-emptively allocates a share name for the given [AddOptions] and returns it. +// If a matching share is already tracked, the existing name is returned without +// allocating a new entry. ResolveShareName does not drive any HCS call or increment the +// reference count; callers must follow up with [Manager.AddToVM] to claim the share. +func (m *Manager) ResolveShareName(ctx context.Context, opts *AddOptions) (string, error) { + if !opts.ReadOnly && m.noWritableFileShares { + return "", fmt.Errorf("adding writable shares is denied: %w", hcs.ErrOperationDenied) + } + + log.G(ctx).WithField(logfields.HostPath, opts.HostPath).Debug("resolving plan9 share name") + + entry := m.getOrAllocateEntry(ctx, opts) + return entry.name, nil +} + // AddToVM adds a Plan9 share to the host VM and returns the generated share name. +// If a share with identical [AddOptions] is already added or in flight, AddToVM +// blocks until that operation completes and returns the share name, incrementing +// the internal reference count. func (m *Manager) AddToVM(ctx context.Context, opts *AddOptions) (_ string, err error) { - m.mu.Lock() - defer m.mu.Unlock() - - // Ensure that adding the share is allowed. + // Validate write-share policy before touching shared state. if !opts.ReadOnly && m.noWritableFileShares { return "", fmt.Errorf("adding writable shares is denied: %w", hcs.ErrOperationDenied) } - // Build the Plan9 share flags bitmask from the caller-provided options. - flags := shareFlagsLinuxMetadata - if opts.ReadOnly { - flags |= shareFlagsReadOnly - } - if opts.Restrict { - flags |= shareFlagsRestrictFileAccess + entry := m.getOrAllocateEntry(ctx, opts) + + // Acquire the per-entry lock to check state and potentially drive the HCS call. + // Multiple goroutines requesting the same share will serialize here. + entry.mu.Lock() + defer entry.mu.Unlock() + + ctx, _ = log.WithContext(ctx, logrus.WithField("shareName", entry.name)) + + log.G(ctx).Debug("received share entry, checking state") + + switch entry.state { + case shareAdded: + // ============================================================================== + // Found an existing live share — reuse it. + // ============================================================================== + entry.refCount++ + log.G(ctx).Debug("plan9 share already added to VM, reusing existing share") + return entry.name, nil + + case sharePending: + // ============================================================================== + // New share — we own the HCS call. + // Other callers requesting the same share will block on entry.mu until we + // transition the state out of sharePending. + // ============================================================================== + flags := shareFlagsLinuxMetadata + if opts.ReadOnly { + flags |= shareFlagsReadOnly + } + if opts.Restrict { + flags |= shareFlagsRestrictFileAccess + } + + log.G(ctx).WithFields(logrus.Fields{ + logfields.HostPath: opts.HostPath, + logfields.ReadOnly: opts.ReadOnly, + "RestrictFileAccess": opts.Restrict, + "AllowedFiles": opts.AllowedNames, + }).Trace("adding plan9 share to host VM") + + if err = m.vmPlan9Mgr.AddPlan9(ctx, hcsschema.Plan9Share{ + Name: entry.name, + AccessName: entry.name, + Path: opts.HostPath, + Port: vmutils.Plan9Port, + Flags: flags, + AllowedFiles: opts.AllowedNames, + }); err != nil { + // Transition to Invalid so that waiting goroutines see the real failure reason. + entry.state = shareInvalid + entry.stateErr = err + + // Remove from the map so subsequent calls can retry with a fresh entry. + m.mu.Lock() + delete(m.shares, entry.name) + m.mu.Unlock() + + return "", fmt.Errorf("add plan9 share %s to host: %w", entry.name, err) + } + + entry.state = shareAdded + entry.refCount++ + + log.G(ctx).Info("plan9 share added to host VM") + + return entry.name, nil + + case shareInvalid: + // ============================================================================== + // A previous AddPlan9 call for this entry failed. + // ============================================================================== + // Return the original error. The map entry has already been removed + // by the goroutine that drove the failed add. + return "", fmt.Errorf("previous attempt to add plan9 share %s to VM failed: %w", + entry.name, entry.stateErr) + + default: + return "", fmt.Errorf("plan9 share in unexpected state %s during add", entry.state) } +} - // Generate a unique share name from the nameCounter. - name := strconv.FormatUint(m.nameCounter, 10) - m.nameCounter++ +// getOrAllocateEntry either reuses an existing [shareEntry] whose options match opts, +// or allocates a new pending entry with a freshly generated name. +// The returned entry's refCount is not incremented; callers that claim the share +// must increment it themselves. +func (m *Manager) getOrAllocateEntry(ctx context.Context, opts *AddOptions) *shareEntry { + m.mu.Lock() + defer m.mu.Unlock() - ctx, _ = log.WithContext(ctx, logrus.WithField("shareName", name)) - - log.G(ctx).WithFields(logrus.Fields{ - logfields.HostPath: opts.HostPath, - logfields.ReadOnly: opts.ReadOnly, - "RestrictFileAccess": opts.Restrict, - "AllowedFiles": opts.AllowedNames, - }).Tracef("adding plan9 share to host VM") - - // Call into HCS to add the Plan9 share to the VM. - if err := m.vmPlan9Mgr.AddPlan9(ctx, hcsschema.Plan9Share{ - Name: name, - AccessName: name, - Path: opts.HostPath, - Port: vmutils.Plan9Port, - Flags: flags, - AllowedFiles: opts.AllowedNames, - }); err != nil { - return "", fmt.Errorf("add plan9 share %s to host: %w", name, err) + // Reuse an existing entry if its options match the caller's. + for _, existing := range m.shares { + if optionsMatch(existing.opts, opts) { + return existing + } } - m.shares[name] = struct{}{} + log.G(ctx).Debug("no existing plan9 share found for options, allocating new entry") - log.G(ctx).Info("plan9 share added to host VM") + name := strconv.FormatUint(m.nameCounter, 10) + m.nameCounter++ - return name, nil + entry := &shareEntry{ + opts: opts, + name: name, + state: sharePending, + // refCount is 0; it will be incremented by the goroutine that drives AddPlan9. + refCount: 0, + } + m.shares[name] = entry + return entry } // RemoveFromVM removes the Plan9 share identified by shareName from the host VM. +// If the share is held by multiple callers, RemoveFromVM decrements the reference +// count and returns without tearing down the share until the last caller removes it. func (m *Manager) RemoveFromVM(ctx context.Context, shareName string) error { - m.mu.Lock() - defer m.mu.Unlock() - ctx, _ = log.WithContext(ctx, logrus.WithField("shareName", shareName)) - if _, ok := m.shares[shareName]; !ok { + m.mu.Lock() + entry := m.shares[shareName] + m.mu.Unlock() + + if entry == nil { log.G(ctx).Debug("plan9 share not found, skipping removal") return nil } - log.G(ctx).Debug("starting plan9 share removal") + entry.mu.Lock() + defer entry.mu.Unlock() - // Call into HCS to remove the share from the VM. - if err := m.vmPlan9Mgr.RemovePlan9(ctx, hcsschema.Plan9Share{ - Name: shareName, - AccessName: shareName, - Port: vmutils.Plan9Port, - }); err != nil { - return fmt.Errorf("remove plan9 share %s from host: %w", shareName, err) + if entry.state == shareInvalid { + // AddPlan9 never succeeded; nothing to remove from HCS. + return nil } - delete(m.shares, shareName) + if entry.refCount > 1 { + entry.refCount-- + log.G(ctx).Debug("plan9 share still in use by other callers, not removing from VM") + return nil + } - log.G(ctx).Info("plan9 share removed from host VM") + // refCount is 0 (pre-allocated via ResolveShareName but never added) or 1 (last caller). + // Only call RemovePlan9 when the share was actually added to the VM. + if entry.state == shareAdded { + log.G(ctx).Debug("starting plan9 share removal") + + if err := m.vmPlan9Mgr.RemovePlan9(ctx, hcsschema.Plan9Share{ + Name: shareName, + AccessName: shareName, + Port: vmutils.Plan9Port, + }); err != nil { + return fmt.Errorf("remove plan9 share %s from host: %w", shareName, err) + } + + entry.state = shareRemoved + log.G(ctx).Info("plan9 share removed from host VM") + } + + // Clean up from the map regardless of whether AddPlan9 was ever called. + m.mu.Lock() + delete(m.shares, shareName) + m.mu.Unlock() return nil } diff --git a/internal/controller/device/plan9/state.go b/internal/controller/device/plan9/state.go new file mode 100644 index 0000000000..edd8212bf8 --- /dev/null +++ b/internal/controller/device/plan9/state.go @@ -0,0 +1,55 @@ +//go:build windows && !wcow + +package plan9 + +// shareState represents the current state of a Plan9 share's lifecycle. +// +// The normal progression is: +// +// sharePending → shareAdded → shareRemoved +// +// If AddPlan9 fails, the owning goroutine moves the share to +// shareInvalid and records the error in [shareEntry.stateErr]. Other goroutines +// waiting on the same entry observe the invalid state and receive the original error. +// The entry is removed from the map immediately after the transition. +// +// Full state-transition table: +// +// Current State │ Trigger │ Next State +// ───────────────┼───────────────────────────┼───────────────────────────── +// sharePending │ AddPlan9 succeeds │ shareAdded +// sharePending │ AddPlan9 fails │ shareInvalid +// shareAdded │ RemovePlan9 succeeds │ shareRemoved +// shareRemoved │ (terminal — no transitions)│ — +// shareInvalid │ entry removed from map │ — +type shareState int + +const ( + // sharePending is the initial state; AddPlan9 has not yet completed. + sharePending shareState = iota + + // shareAdded means AddPlan9 succeeded; the share is live on the VM. + shareAdded + + // shareRemoved means RemovePlan9 succeeded. This is a terminal state. + shareRemoved + + // shareInvalid means AddPlan9 failed. + shareInvalid +) + +// String returns a human-readable name for the [shareState]. +func (s shareState) String() string { + switch s { + case sharePending: + return "Pending" + case shareAdded: + return "Added" + case shareRemoved: + return "Removed" + case shareInvalid: + return "Invalid" + default: + return "Unknown" + } +} diff --git a/internal/controller/device/plan9/types.go b/internal/controller/device/plan9/types.go new file mode 100644 index 0000000000..5828774bc0 --- /dev/null +++ b/internal/controller/device/plan9/types.go @@ -0,0 +1,85 @@ +//go:build windows && !wcow + +package plan9 + +import ( + "context" + "sync" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// AddOptions holds the configuration required to add a Plan9 share to the VM. +type AddOptions struct { + // HostPath is the path on the host to share into the VM. + HostPath string + + // ReadOnly indicates whether the share should be mounted read-only. + ReadOnly bool + + // Restrict enables single-file mapping mode for the share. + Restrict bool + + // AllowedNames is the list of file names allowed when Restrict is true. + AllowedNames []string +} + +// vmPlan9Manager manages adding and removing Plan9 shares on the host VM. +// Implemented by [vmmanager.UtilityVM]. +type vmPlan9Manager interface { + // AddPlan9 adds a plan 9 share to a running Utility VM. + AddPlan9(ctx context.Context, settings hcsschema.Plan9Share) error + + // RemovePlan9 removes a plan 9 share from a running Utility VM. + RemovePlan9(ctx context.Context, settings hcsschema.Plan9Share) error +} + +// ============================================================================== +// INTERNAL DATA STRUCTURES +// Types below this line are unexported and used for state tracking. +// ============================================================================== + +// shareEntry records one Plan9 share's full lifecycle state and reference count. +type shareEntry struct { + // mu serializes state transitions. + mu sync.Mutex + + // opts is the immutable share parameters used to match duplicate add requests. + opts *AddOptions + + // name is the HCS-level identifier for this share, generated at allocation time. + name string + + // refCount is the number of active callers sharing this entry. + // Access must be guarded by [Manager.mu]. + refCount uint + + // state tracks the forward-only lifecycle position of this share. + // Access must be guarded by mu. + state shareState + + // stateErr records the error that caused a transition to [shareInvalid]. + // Waiters that find the entry in the invalid state return this error so + // that every caller sees the original failure reason. + stateErr error +} + +// optionsMatch reports whether two [AddOptions] values describe the same share. +// AllowedNames is compared in order. +func optionsMatch(a, b *AddOptions) bool { + if a == nil || b == nil { + return a == b + } + if a.HostPath != b.HostPath || a.ReadOnly != b.ReadOnly || a.Restrict != b.Restrict { + return false + } + if len(a.AllowedNames) != len(b.AllowedNames) { + return false + } + for i := range a.AllowedNames { + if a.AllowedNames[i] != b.AllowedNames[i] { + return false + } + } + return true +} From b2cff9965a52c20f9094a776e1682bf296c0b413 Mon Sep 17 00:00:00 2001 From: Harsh Rawat Date: Sun, 5 Apr 2026 06:18:41 +0530 Subject: [PATCH 3/7] review 1 Signed-off-by: Harsh Rawat --- .../controller/device/plan9/controller.go | 248 +++++++++ .../device/plan9/controller_test.go | 527 ++++++++++++++++++ internal/controller/device/plan9/doc.go | 103 ++-- internal/controller/device/plan9/mount/doc.go | 41 ++ .../device/plan9/mount/mocks/mock_mount.go | 94 ++++ .../controller/device/plan9/mount/mount.go | 169 ++++++ .../device/plan9/mount/mount_test.go | 330 +++++++++++ .../controller/device/plan9/mount/state.go | 58 ++ .../controller/device/plan9/mount/types.go | 32 ++ internal/controller/device/plan9/plan9.go | 252 --------- internal/controller/device/plan9/share/doc.go | 33 ++ .../device/plan9/share/mocks/mock_share.go | 94 ++++ .../controller/device/plan9/share/share.go | 244 ++++++++ .../device/plan9/share/share_test.go | 431 ++++++++++++++ .../controller/device/plan9/share/state.go | 54 ++ .../controller/device/plan9/share/types.go | 42 ++ internal/controller/device/plan9/state.go | 55 -- internal/controller/device/plan9/types.go | 91 +-- 18 files changed, 2458 insertions(+), 440 deletions(-) create mode 100644 internal/controller/device/plan9/controller.go create mode 100644 internal/controller/device/plan9/controller_test.go create mode 100644 internal/controller/device/plan9/mount/doc.go create mode 100644 internal/controller/device/plan9/mount/mocks/mock_mount.go create mode 100644 internal/controller/device/plan9/mount/mount.go create mode 100644 internal/controller/device/plan9/mount/mount_test.go create mode 100644 internal/controller/device/plan9/mount/state.go create mode 100644 internal/controller/device/plan9/mount/types.go delete mode 100644 internal/controller/device/plan9/plan9.go create mode 100644 internal/controller/device/plan9/share/doc.go create mode 100644 internal/controller/device/plan9/share/mocks/mock_share.go create mode 100644 internal/controller/device/plan9/share/share.go create mode 100644 internal/controller/device/plan9/share/share_test.go create mode 100644 internal/controller/device/plan9/share/state.go create mode 100644 internal/controller/device/plan9/share/types.go delete mode 100644 internal/controller/device/plan9/state.go diff --git a/internal/controller/device/plan9/controller.go b/internal/controller/device/plan9/controller.go new file mode 100644 index 0000000000..0d9269c282 --- /dev/null +++ b/internal/controller/device/plan9/controller.go @@ -0,0 +1,248 @@ +//go:build windows + +package plan9 + +import ( + "context" + "fmt" + "strconv" + "sync" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/controller/device/plan9/mount" + "github.com/Microsoft/hcsshim/internal/controller/device/plan9/share" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/sirupsen/logrus" +) + +// Controller manages the full Plan9 share lifecycle — name allocation, VM +// attachment, guest mounting, and teardown. All operations are serialized +// by a single mutex. +// It is required that all callers: +// +// 1. Obtain a reservation using Reserve(). +// +// 2. Use the reservation in MapToGuest() to mount the share unto guest. +// +// 3. Call UnmapFromGuest() to release the reservation and all resources. +// +// If MapToGuest() fails, the caller must call UnmapFromGuest() to release the +// reservation and all resources. +// +// If UnmapFromGuest() fails, the caller must call UnmapFromGuest() again until +// it succeeds to release the reservation and all resources. +type Controller struct { + // mu serializes all public operations on the Controller. + mu sync.Mutex + + // vmPlan9 is the host-side interface for adding and removing Plan9 shares. + // Immutable after construction. + vmPlan9 vmPlan9 + + // linuxGuest is the guest-side interface for LCOW Plan9 operations. + // Immutable after construction. + linuxGuest guestPlan9 + + // noWritableFileShares disallows adding writable Plan9 shares. + // Immutable after construction. + noWritableFileShares bool + + // reservations maps a reservation ID to its share host path. + // Guarded by mu. + reservations map[guid.GUID]*reservation + + // sharesByHostPath maps a host path to its share for fast deduplication + // of share additions. Guarded by mu. + sharesByHostPath map[string]*share.Share + + // nameCounter is the monotonically increasing index used to generate + // unique share names. Guarded by mu. + nameCounter uint64 +} + +// New creates a new [Controller] for managing the plan9 shares on a VM. +func New(vm vmPlan9, linuxGuest guestPlan9, noWritableFileShares bool) *Controller { + return &Controller{ + vmPlan9: vm, + linuxGuest: linuxGuest, + noWritableFileShares: noWritableFileShares, + reservations: make(map[guid.GUID]*reservation), + sharesByHostPath: make(map[string]*share.Share), + } +} + +// Reserve reserves a reference-counted mapping entry for a Plan9 share based on +// the share host path. +// +// If an error is returned from this function, it is guaranteed that no +// reservation mapping was made and no UnmapFromGuest() call is necessary to +// clean up. +func (c *Controller) Reserve(ctx context.Context, shareConfig share.Config, mountConfig mount.Config) (guid.GUID, string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + // Validate write-share policy before touching shared state. + if !shareConfig.ReadOnly && c.noWritableFileShares { + return guid.GUID{}, "", fmt.Errorf("adding writable Plan9 shares is denied") + } + + ctx, _ = log.WithContext(ctx, logrus.WithField(logfields.HostPath, shareConfig.HostPath)) + log.G(ctx).Debug("reserving Plan9 share") + + // Generate a unique reservation ID. + id, err := guid.NewV4() + if err != nil { + return guid.GUID{}, "", fmt.Errorf("generate reservation ID: %w", err) + } + + // Check if the generated reservation ID already exists, which is extremely unlikely, + // but we want to be certain before proceeding with share creation. + if _, ok := c.reservations[id]; ok { + return guid.GUID{}, "", fmt.Errorf("reservation ID already exists: %s", id) + } + + // Create the reservation entry. + res := &reservation{ + hostPath: shareConfig.HostPath, + } + + var guestPath string + + // Check whether this host path already has an allocated share. + existingShare, ok := c.sharesByHostPath[shareConfig.HostPath] + + // We have an existing share for this host path — reserve a mount on it for this caller. + if ok { + // Verify the caller is requesting the same share configuration. + if !existingShare.Config().Equals(shareConfig) { + return guid.GUID{}, "", fmt.Errorf("cannot reserve ref on share with different config") + } + + // Set the share name. + res.name = existingShare.Name() + + // We have a share, now reserve a mount on it. + if _, err = existingShare.ReserveMount(ctx, mountConfig); err != nil { + return guid.GUID{}, "", fmt.Errorf("reserve mount on share %s: %w", existingShare.Name(), err) + } + + guestPath = existingShare.GuestPath() + } + + // If we don't have an existing share, we need to create one and reserve a mount on it. + if !ok { + // No existing share for this path — allocate a new one. + name := strconv.FormatUint(c.nameCounter, 10) + c.nameCounter++ + + // Create the Share and Mount in the reserved states. + newShare := share.NewReserved(name, shareConfig) + if _, err = newShare.ReserveMount(ctx, mountConfig); err != nil { + return guid.GUID{}, "", fmt.Errorf("reserve mount on share %s: %w", name, err) + } + + c.sharesByHostPath[shareConfig.HostPath] = newShare + res.name = newShare.Name() + + guestPath = newShare.GuestPath() + } + + // Ensure our reservation is saved for all future operations. + c.reservations[id] = res + log.G(ctx).WithField("reservation", id).Debug("Plan9 share reserved") + + // Return the reserved guest path in addition to the reservation ID for caller convenience. + return id, guestPath, nil +} + +// MapToGuest adds the reserved share to the VM and mounts it inside the guest, +// returning the guest path. It is idempotent for a reservation that is already +// fully mapped. +func (c *Controller) MapToGuest(ctx context.Context, id guid.GUID) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + // Check if the reservation exists. + res, ok := c.reservations[id] + if !ok { + return "", fmt.Errorf("reservation %s not found", id) + } + + // Validate if the host path has an associated share. + // This should be reserved by the Reserve() call. + existingShare := c.sharesByHostPath[res.hostPath] + if existingShare == nil { + return "", fmt.Errorf("share for host path %s not found", res.hostPath) + } + + log.G(ctx).WithField(logfields.HostPath, existingShare.HostPath()).Debug("mapping Plan9 share to guest") + + // Add the share to the VM (idempotent if already added). + if err := existingShare.AddToVM(ctx, c.vmPlan9); err != nil { + return "", fmt.Errorf("add share to VM: %w", err) + } + + // Mount the share inside the guest. + guestPath, err := existingShare.MountToGuest(ctx, c.linuxGuest) + if err != nil { + return "", fmt.Errorf("mount share to guest: %w", err) + } + + log.G(ctx).WithField(logfields.UVMPath, guestPath).Debug("Plan9 share mapped to guest") + return guestPath, nil +} + +// UnmapFromGuest unmounts the share from the guest and, when all reservations +// for the share are released, removes the share from the VM. A failed call is +// retryable with the same reservation ID. +func (c *Controller) UnmapFromGuest(ctx context.Context, id guid.GUID) error { + c.mu.Lock() + defer c.mu.Unlock() + + ctx, _ = log.WithContext(ctx, logrus.WithField("res", id.String())) + + // Validate that the reservation exists before proceeding with teardown. + res, ok := c.reservations[id] + if !ok { + return fmt.Errorf("reservation %s not found", id) + } + + // Validate that the share exists before proceeding with teardown. + // This should be reserved by the Reserve() call. + existingShare := c.sharesByHostPath[res.hostPath] + if existingShare == nil { + // Share is gone — a sibling reservation already cleaned it up. + // Example: A and B reserve the same path; A's AddToVM fails + // (share→StateRemoved) and A's UnmapFromGuest deletes the map entry. + // B has nothing left to clean up, so just drop the reservation. + delete(c.reservations, id) + return nil + } + + log.G(ctx).WithField(logfields.HostPath, existingShare.HostPath()).Debug("unmapping Plan9 share from guest") + + // Unmount the share from the guest (ref-counted; only issues the guest + // call when this is the last res on the share). + if err := existingShare.UnmountFromGuest(ctx, c.linuxGuest); err != nil { + return fmt.Errorf("unmount share from guest: %w", err) + } + + // Remove the share from the VM when no mounts remain active. + if err := existingShare.RemoveFromVM(ctx, c.vmPlan9); err != nil { + return fmt.Errorf("remove share from VM: %w", err) + } + + // If the share is now fully removed, free its entry for reuse. + // If it's used in other reservations, it will remain until the last one is released. + if existingShare.State() == share.StateRemoved { + delete(c.sharesByHostPath, existingShare.HostPath()) + log.G(ctx).Debug("Plan9 share freed") + } + + // Remove the res last so it remains available for retries if + // any earlier step above fails. + delete(c.reservations, id) + log.G(ctx).Debug("Plan9 share unmapped from guest") + return nil +} diff --git a/internal/controller/device/plan9/controller_test.go b/internal/controller/device/plan9/controller_test.go new file mode 100644 index 0000000000..a55f50a7a9 --- /dev/null +++ b/internal/controller/device/plan9/controller_test.go @@ -0,0 +1,527 @@ +//go:build windows && lcow + +package plan9 + +import ( + "context" + "errors" + "testing" + + "go.uber.org/mock/gomock" + + "github.com/Microsoft/hcsshim/internal/controller/device/plan9/mount" + mountmocks "github.com/Microsoft/hcsshim/internal/controller/device/plan9/mount/mocks" + "github.com/Microsoft/hcsshim/internal/controller/device/plan9/share" + sharemocks "github.com/Microsoft/hcsshim/internal/controller/device/plan9/share/mocks" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/protocol/guestresource" +) + +var ( + errVMAdd = errors.New("VM add failed") + errVMRemove = errors.New("VM remove failed") + errMount = errors.New("guest mount failed") + errUnmount = errors.New("guest unmount failed") +) + +// ───────────────────────────────────────────────────────────────────────────── +// Helper interfaces and test controller setup +// ───────────────────────────────────────────────────────────────────────────── + +// combinedVM satisfies vmPlan9 (share.VMPlan9Adder + share.VMPlan9Remover) +// by delegating to the individual sharemocks so each can carry independent expectations. +type combinedVM struct { + add *sharemocks.MockVMPlan9Adder + remove *sharemocks.MockVMPlan9Remover +} + +func (v *combinedVM) AddPlan9(ctx context.Context, settings hcsschema.Plan9Share) error { + return v.add.AddPlan9(ctx, settings) +} + +func (v *combinedVM) RemovePlan9(ctx context.Context, settings hcsschema.Plan9Share) error { + return v.remove.RemovePlan9(ctx, settings) +} + +// combinedGuest satisfies guestPlan9 (LinuxGuestPlan9Mounter + LinuxGuestPlan9Unmounter). +type combinedGuest struct { + mounter *mountmocks.MockLinuxGuestPlan9Mounter + unmounter *mountmocks.MockLinuxGuestPlan9Unmounter +} + +func (g *combinedGuest) AddLCOWMappedDirectory(ctx context.Context, settings guestresource.LCOWMappedDirectory) error { + return g.mounter.AddLCOWMappedDirectory(ctx, settings) +} + +func (g *combinedGuest) RemoveLCOWMappedDirectory(ctx context.Context, settings guestresource.LCOWMappedDirectory) error { + return g.unmounter.RemoveLCOWMappedDirectory(ctx, settings) +} + +type testController struct { + ctx context.Context + c *Controller + vmAdd *sharemocks.MockVMPlan9Adder + vmRemove *sharemocks.MockVMPlan9Remover + guestMount *mountmocks.MockLinuxGuestPlan9Mounter + guestUnmount *mountmocks.MockLinuxGuestPlan9Unmounter +} + +func newTestController(t *testing.T, noWritableFileShares bool) *testController { + t.Helper() + + ctrl := gomock.NewController(t) + vmAdd := sharemocks.NewMockVMPlan9Adder(ctrl) + vmRemove := sharemocks.NewMockVMPlan9Remover(ctrl) + guestMount := mountmocks.NewMockLinuxGuestPlan9Mounter(ctrl) + guestUnmount := mountmocks.NewMockLinuxGuestPlan9Unmounter(ctrl) + + vm := &combinedVM{add: vmAdd, remove: vmRemove} + guest := &combinedGuest{mounter: guestMount, unmounter: guestUnmount} + + return &testController{ + ctx: context.Background(), + c: New(vm, guest, noWritableFileShares), + vmAdd: vmAdd, + vmRemove: vmRemove, + guestMount: guestMount, + guestUnmount: guestUnmount, + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// Reserve tests +// ───────────────────────────────────────────────────────────────────────────── + +// TestReserve_NewShare verifies that reserving a new host path creates a share +// entry, returns a non-empty guest path, and stores a unique reservation ID. +func TestReserve_NewShare(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id, guestPath, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if guestPath == "" { + t.Error("expected non-empty guest path") + } + if len(tc.c.reservations) != 1 { + t.Errorf("expected 1 reservation, got %d", len(tc.c.reservations)) + } + if len(tc.c.sharesByHostPath) != 1 { + t.Errorf("expected 1 share, got %d", len(tc.c.sharesByHostPath)) + } + if _, ok := tc.c.reservations[id]; !ok { + t.Error("returned ID not present in reservations map") + } +} + +// TestReserve_SameHostPath_RefsExistingShare verifies that two reservations for +// the same host path reuse the existing share (only one share entry) and return +// distinct reservation IDs but the same guest path. +func TestReserve_SameHostPath_RefsExistingShare(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id1, gp1, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id2, gp2, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + if id1 == id2 { + t.Error("expected different reservation IDs for two callers on the same path") + } + if gp1 != gp2 { + t.Errorf("expected same guest path for same host path, got %q vs %q", gp1, gp2) + } + if len(tc.c.sharesByHostPath) != 1 { + t.Errorf("expected 1 share for same host path, got %d", len(tc.c.sharesByHostPath)) + } + if len(tc.c.reservations) != 2 { + t.Errorf("expected 2 reservations, got %d", len(tc.c.reservations)) + } +} + +// TestReserve_DifferentHostPaths_CreatesSeparateShares verifies that reserving +// two distinct host paths creates two independent share entries. +func TestReserve_DifferentHostPaths_CreatesSeparateShares(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + _, _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/a"}, mount.Config{}) + _, _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/b"}, mount.Config{}) + + if len(tc.c.sharesByHostPath) != 2 { + t.Errorf("expected 2 shares, got %d", len(tc.c.sharesByHostPath)) + } +} + +// TestReserve_DifferentConfig_SameHostPath_Errors verifies that attempting to +// reserve a host path with a different config (e.g., ReadOnly differs) when a +// share already exists for that path returns an error. +func TestReserve_DifferentConfig_SameHostPath_Errors(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + _, _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + // Same host path but read-only flag differs. + _, _, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path", ReadOnly: true}, mount.Config{}) + if err == nil { + t.Fatal("expected error when re-reserving same host path with different config") + } +} + +// TestReserve_WritableDenied verifies that when the controller is constructed +// with noWritableFileShares=true, reserving a writable share is rejected before +// any share or mount state is created. +func TestReserve_WritableDenied(t *testing.T) { + t.Parallel() + tc := newTestController(t, true /* noWritableFileShares */) + + _, _, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path" /* writable */}, mount.Config{}) + if err == nil { + t.Fatal("expected error when adding writable share with noWritableFileShares=true") + } +} + +// TestReserve_ReadOnlyAllowedWhenWritableDenied verifies that read-only shares +// are still permitted when noWritableFileShares=true. +func TestReserve_ReadOnlyAllowedWhenWritableDenied(t *testing.T) { + t.Parallel() + tc := newTestController(t, true /* noWritableFileShares */) + + _, _, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path", ReadOnly: true /* readOnly */}, mount.Config{ReadOnly: true}) + if err != nil { + t.Fatalf("unexpected error for read-only share: %v", err) + } +} + +// TestReserve_NameCounterIncrements verifies that each new share created for a +// distinct host path increments the controller's nameCounter. +func TestReserve_NameCounterIncrements(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + _, _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/a"}, mount.Config{}) + _, _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/b"}, mount.Config{}) + + if tc.c.nameCounter != 2 { + t.Errorf("expected nameCounter=2, got %d", tc.c.nameCounter) + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// MapToGuest tests +// ───────────────────────────────────────────────────────────────────────────── + +// TestMapToGuest_HappyPath verifies a normal Reserve → MapToGuest flow: the +// share is added to the VM and mounted in the guest, returning a non-empty +// guest path that matches the one returned during Reserve. +func TestMapToGuest_HappyPath(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id, gp, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + + guestPath, err := tc.c.MapToGuest(tc.ctx, id) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if guestPath == "" { + t.Error("expected non-empty guest path") + } + if guestPath != gp { + t.Errorf("expected guest path %q from MapToGuest to match reservation guest path %q", guestPath, gp) + } +} + +// TestMapToGuest_VMAddFails_Errors verifies that when the host-side AddPlan9 +// fails, the error propagates and no guest mount is attempted. +func TestMapToGuest_VMAddFails_Errors(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) + + _, err := tc.c.MapToGuest(tc.ctx, id) + if err == nil { + t.Fatal("expected error when VM add fails") + } +} + +// TestMapToGuest_GuestMountFails_RetryMapToGuest_Fails verifies the "forward" +// recovery path after a guest mount failure. Because the mount is now in the +// terminal unmounted state, a subsequent MapToGuest with the same reservation +// also fails. The caller must use UnmapFromGuest to clean up. +func TestMapToGuest_GuestMountFails_RetryMapToGuest_Fails(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + // First MapToGuest: VM add succeeds, guest mount fails. + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(errMount) + + _, err := tc.c.MapToGuest(tc.ctx, id) + if err == nil { + t.Fatal("expected error when guest mount fails") + } + + // Forward retry: MapToGuest again with the same reservation. + // AddToVM is idempotent (share already in StateAdded), but MountToGuest + // fails because the mount is in the terminal StateUnmounted. + _, err = tc.c.MapToGuest(tc.ctx, id) + if err == nil { + t.Fatal("expected error on retry MapToGuest after terminal mount failure") + } + + // Reservation should still exist — caller must call UnmapFromGuest. + if _, ok := tc.c.reservations[id]; !ok { + t.Error("reservation should still exist after failed retry") + } +} + +// TestMapToGuest_GuestMountFails_UnmapFromGuest_CleansUp verifies the "backward" +// recovery path after a guest mount failure. The caller invokes UnmapFromGuest +// which skips the guest unmount (mount was never established), removes the share +// from the VM, and cleans up both the reservation and share entries. +func TestMapToGuest_GuestMountFails_UnmapFromGuest_CleansUp(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + // MapToGuest: VM add succeeds, guest mount fails. + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(errMount) + + _, err := tc.c.MapToGuest(tc.ctx, id) + if err == nil { + t.Fatal("expected error when guest mount fails") + } + + // Backward path: UnmapFromGuest cleans up the share from the VM. + // No guest unmount is expected (mount was never established). + // VM remove IS expected (share was added to VM successfully). + tc.vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(nil) + + if err := tc.c.UnmapFromGuest(tc.ctx, id); err != nil { + t.Fatalf("UnmapFromGuest after failed mount: %v", err) + } + + // Everything should be cleaned up. + if len(tc.c.reservations) != 0 { + t.Errorf("expected 0 reservations after cleanup, got %d", len(tc.c.reservations)) + } + if len(tc.c.sharesByHostPath) != 0 { + t.Errorf("expected 0 shares after cleanup, got %d", len(tc.c.sharesByHostPath)) + } +} + +// TestMapToGuest_SharedPath_VMAddCalledOnce verifies that when two reservations +// share the same host path, AddPlan9 and AddLCOWMappedDirectory are each called +// exactly once — the second MapToGuest is a no-op that returns the existing +// guest path. +func TestMapToGuest_SharedPath_VMAddCalledOnce(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id1, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id2, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + // AddPlan9 and AddLCOWMappedDirectory each called exactly once. + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil).Times(1) + tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil).Times(1) + + gp1, err := tc.c.MapToGuest(tc.ctx, id1) + if err != nil { + t.Fatalf("MapToGuest id1: %v", err) + } + gp2, err := tc.c.MapToGuest(tc.ctx, id2) + if err != nil { + t.Fatalf("MapToGuest id2: %v", err) + } + if gp1 != gp2 { + t.Errorf("expected same guest path for shared mount, got %q vs %q", gp1, gp2) + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// UnmapFromGuest tests +// ───────────────────────────────────────────────────────────────────────────── + +// TestUnmapFromGuest_HappyPath verifies the successful teardown of a fully +// mapped share: the guest unmount and VM removal are issued, and both the +// reservation and share entries are cleaned up. +func TestUnmapFromGuest_HappyPath(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _, _ = tc.c.MapToGuest(tc.ctx, id) + + tc.guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + tc.vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(nil) + + if err := tc.c.UnmapFromGuest(tc.ctx, id); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(tc.c.reservations) != 0 { + t.Errorf("expected 0 reservations after unmap, got %d", len(tc.c.reservations)) + } + if len(tc.c.sharesByHostPath) != 0 { + t.Errorf("expected 0 shares after unmap, got %d", len(tc.c.sharesByHostPath)) + } +} + +// TestUnmapFromGuest_GuestUnmountFails_Retryable verifies that a failed guest +// unmount leaves the reservation intact so the caller can retry. On a +// successful retry the mount is unmounted and the share is removed from the VM. +func TestUnmapFromGuest_GuestUnmountFails_Retryable(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _, _ = tc.c.MapToGuest(tc.ctx, id) + + // First unmap: guest unmount fails. + tc.guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(errUnmount) + if err := tc.c.UnmapFromGuest(tc.ctx, id); err == nil { + t.Fatal("expected error on failed guest unmount") + } + if _, ok := tc.c.reservations[id]; !ok { + t.Error("reservation should remain for retry after failed unmount") + } + + // Retry succeeds. + tc.guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + tc.vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(nil) + if err := tc.c.UnmapFromGuest(tc.ctx, id); err != nil { + t.Fatalf("retry UnmapFromGuest failed: %v", err) + } +} + +// TestUnmapFromGuest_VMRemoveFails_Retryable verifies that when the guest +// unmount succeeds but VM removal fails, the reservation is preserved for +// retry. On retry only VM removal is re-attempted — the guest unmount is not +// re-issued. +func TestUnmapFromGuest_VMRemoveFails_Retryable(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _, _ = tc.c.MapToGuest(tc.ctx, id) + + // First unmap: guest unmount succeeds, VM remove fails. + tc.guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + tc.vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(errVMRemove) + if err := tc.c.UnmapFromGuest(tc.ctx, id); err == nil { + t.Fatal("expected error on failed VM remove") + } + if _, ok := tc.c.reservations[id]; !ok { + t.Error("reservation should remain for retry after failed VM remove") + } + + // Retry succeeds. + tc.vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(nil) + if err := tc.c.UnmapFromGuest(tc.ctx, id); err != nil { + t.Fatalf("retry UnmapFromGuest failed: %v", err) + } +} + +// TestUnmapFromGuest_RefCounting_VMRemoveOnLastRef verifies that with two +// reservations on the same path, the first UnmapFromGuest only decrements the +// ref count (no guest/VM calls), and the second issues the physical guest +// unmount and VM removal. +func TestUnmapFromGuest_RefCounting_VMRemoveOnLastRef(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id1, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id2, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil).Times(1) + tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil).Times(1) + _, _ = tc.c.MapToGuest(tc.ctx, id1) + _, _ = tc.c.MapToGuest(tc.ctx, id2) + + // First unmap: ref drops to 1 — no VM or guest calls. + if err := tc.c.UnmapFromGuest(tc.ctx, id1); err != nil { + t.Fatalf("first UnmapFromGuest: %v", err) + } + if len(tc.c.sharesByHostPath) != 1 { + t.Errorf("share should still exist after first unmap, got %d shares", len(tc.c.sharesByHostPath)) + } + + // Second unmap: last ref — guest unmount and VM remove issued. + tc.guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil).Times(1) + tc.vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(nil).Times(1) + if err := tc.c.UnmapFromGuest(tc.ctx, id2); err != nil { + t.Fatalf("second UnmapFromGuest: %v", err) + } + if len(tc.c.sharesByHostPath) != 0 { + t.Errorf("expected 0 shares after last unmap, got %d", len(tc.c.sharesByHostPath)) + } +} + +// TestUnmapFromGuest_WithoutMapToGuest_CleansUp verifies that calling +// UnmapFromGuest on a reservation that was never passed to MapToGuest still +// cleans up correctly without issuing any VM or guest calls. +func TestUnmapFromGuest_WithoutMapToGuest_CleansUp(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + // Reserve but never MapToGuest — no VM or guest calls expected. + id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + if err := tc.c.UnmapFromGuest(tc.ctx, id); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(tc.c.reservations) != 0 { + t.Errorf("expected 0 reservations, got %d", len(tc.c.reservations)) + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// Full lifecycle tests +// ───────────────────────────────────────────────────────────────────────────── + +// TestFullLifecycle_ReuseAfterRelease verifies that after a full Reserve → +// MapToGuest → UnmapFromGuest cycle, the same host path can be reserved again +// as a fresh share with a new reservation ID. +func TestFullLifecycle_ReuseAfterRelease(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + // First full cycle. + id1, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _, _ = tc.c.MapToGuest(tc.ctx, id1) + tc.guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + tc.vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(nil) + _ = tc.c.UnmapFromGuest(tc.ctx, id1) + + // Reserve the same path again — should start a fresh share. + id2, _, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + if err != nil { + t.Fatalf("re-reserve after release: %v", err) + } + if id1 == id2 { + t.Error("expected a new reservation ID after re-reserving a released path") + } +} diff --git a/internal/controller/device/plan9/doc.go b/internal/controller/device/plan9/doc.go index 08c2ddc9b7..3463f4cff1 100644 --- a/internal/controller/device/plan9/doc.go +++ b/internal/controller/device/plan9/doc.go @@ -1,62 +1,45 @@ -//go:build windows && !wcow +//go:build windows -// Package plan9 provides a manager for managing Plan9 file-share devices -// attached to a Utility VM (UVM). -// -// It handles adding and removing Plan9 shares on the host side via HCS modify -// calls. Guest-side mount operations (mapped-directory requests) are handled -// separately by the mount manager. -// -// # Deduplication and Reference Counting -// -// [Manager] deduplicates shares: if two callers add a share with identical -// [AddOptions], the second call reuses the existing share and increments an -// internal reference count rather than issuing a second HCS call. The share is -// only removed from the VM when the last caller invokes [Manager.RemoveFromVM]. -// -// # Lifecycle -// -// Each share progresses through the states below. -// The happy path runs down the left column; the error path is on the right. -// -// Allocate entry for the share -// │ -// ▼ -// ┌─────────────────────┐ -// │ sharePending │ -// └──────────┬──────────┘ -// │ -// ┌───────┴────────────────────────────────┐ -// │ AddPlan9 succeeds │ AddPlan9 fails -// ▼ ▼ -// ┌─────────────────────┐ ┌──────────────────────┐ -// │ shareAdded │ │ shareInvalid │ -// └──────────┬──────────┘ └──────────────────────┘ -// │ RemovePlan9 succeeds (auto-removed from map) -// ▼ -// ┌─────────────────────┐ -// │ shareRemoved │ ← terminal; entry removed from map -// └─────────────────────┘ -// -// State descriptions: -// -// - [sharePending]: entered when a new entry is allocated (by [Manager.ResolveShareName] -// or the first [Manager.AddToVM] call). No HCS call has been made yet. -// - [shareAdded]: entered once [vmPlan9Manager.AddPlan9] succeeds; -// the share is live on the VM. -// - [shareInvalid]: entered when [vmPlan9Manager.AddPlan9] fails; -// the map entry is removed immediately so the next call can retry. -// - [shareRemoved]: terminal state entered once [vmPlan9Manager.RemovePlan9] succeeds. -// -// Method summary: -// -// - [Manager.ResolveShareName] pre-allocates a share name for the given [AddOptions] -// without issuing any HCS call. If a matching share is already tracked, -// the existing name is returned. This is useful for resolving downstream -// resource paths (e.g., guest mount paths) before the share is live. -// - [Manager.AddToVM] attaches the share, driving the HCS AddPlan9 call on -// the first caller and incrementing the reference count on subsequent ones. -// If the HCS call fails, the entry is removed so the next call can retry. -// - [Manager.RemoveFromVM] decrements the reference count and tears down the -// share only when the count reaches zero. +// Package plan9 manages the full lifecycle of Plan9 share mappings on a +// Hyper-V VM, from host-side name allocation through guest-side mounting. +// +// # Architecture +// +// [Controller] is the primary entry point, exposing three methods: +// +// - [Controller.Reserve]: allocates a reference-counted Plan9 share for a +// host path and returns a reservation ID. +// - [Controller.MapToGuest]: adds the share to the VM and mounts it +// inside the guest. +// - [Controller.UnmapFromGuest]: unmounts the share from the guest and, +// when all reservations for a share are released, removes the share from +// the VM. +// +// All three operations are serialized by a single mutex on the [Controller]. +// +// # Usage +// +// c := plan9.New(vmOps, linuxGuestOps, noWritableFileShares) +// +// // Reserve a share (no I/O yet): +// id, err := c.Reserve(ctx, shareConfig, mountConfig) +// +// // Add the share and mount in the guest: +// guestPath, err := c.MapToGuest(ctx, id) +// +// // Unmount and remove when done: +// err = c.UnmapFromGuest(ctx, id) +// +// # Retry / Idempotency +// +// [Controller.MapToGuest] is idempotent for a reservation that is already +// fully mapped. [Controller.UnmapFromGuest] is retryable: if it fails +// partway through teardown, calling it again with the same reservation ID +// resumes from where the previous attempt stopped. +// +// # Layered Design +// +// The [Controller] delegates all share-level state to [share.Share] and all +// mount-level state to [mount.Mount]; it only coordinates name allocation +// and the overall call sequence. package plan9 diff --git a/internal/controller/device/plan9/mount/doc.go b/internal/controller/device/plan9/mount/doc.go new file mode 100644 index 0000000000..59ee8caf17 --- /dev/null +++ b/internal/controller/device/plan9/mount/doc.go @@ -0,0 +1,41 @@ +//go:build windows + +// Package mount manages the lifecycle of a single Plan9 guest-side mount +// inside a Hyper-V guest VM, from the initial reservation through mounting +// and unmounting. +// +// # Overview +// +// [Mount] is the primary type, representing one guest-side Plan9 mount. +// It tracks its own lifecycle state via [State] and supports reference +// counting so multiple callers can share the same mount. +// +// All operations on a [Mount] are expected to be ordered by the caller. +// No locking is performed at this layer. +// +// # Mount Lifecycle +// +// ┌──────────────────────┐ +// │ StateReserved │ ← mount failure → StateUnmounted (not retriable) +// └──────────┬───────────┘ +// │ guest mount succeeds +// ▼ +// ┌──────────────────────┐ +// │ StateMounted │ +// └──────────┬───────────┘ +// │ reference count → 0; +// │ guest unmount succeeds +// ▼ +// ┌──────────────────────┐ +// │ StateUnmounted │ +// └──────────────────────┘ +// (terminal — entry removed from share) +// +// # Reference Counting +// +// Multiple callers may share a single [Mount] by calling [Mount.Reserve] +// before the mount is issued. [Mount.MountToGuest] issues the guest operation +// only once regardless of the reservation count; subsequent callers receive the +// same guest path. [Mount.UnmountFromGuest] decrements the count and only +// issues the guest unmount when it reaches zero. +package mount diff --git a/internal/controller/device/plan9/mount/mocks/mock_mount.go b/internal/controller/device/plan9/mount/mocks/mock_mount.go new file mode 100644 index 0000000000..44cd54f50a --- /dev/null +++ b/internal/controller/device/plan9/mount/mocks/mock_mount.go @@ -0,0 +1,94 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: types.go +// +// Generated by this command: +// +// mockgen -source types.go -package mocks -destination mocks/mock_mount.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + guestresource "github.com/Microsoft/hcsshim/internal/protocol/guestresource" + gomock "go.uber.org/mock/gomock" +) + +// MockLinuxGuestPlan9Mounter is a mock of LinuxGuestPlan9Mounter interface. +type MockLinuxGuestPlan9Mounter struct { + ctrl *gomock.Controller + recorder *MockLinuxGuestPlan9MounterMockRecorder + isgomock struct{} +} + +// MockLinuxGuestPlan9MounterMockRecorder is the mock recorder for MockLinuxGuestPlan9Mounter. +type MockLinuxGuestPlan9MounterMockRecorder struct { + mock *MockLinuxGuestPlan9Mounter +} + +// NewMockLinuxGuestPlan9Mounter creates a new mock instance. +func NewMockLinuxGuestPlan9Mounter(ctrl *gomock.Controller) *MockLinuxGuestPlan9Mounter { + mock := &MockLinuxGuestPlan9Mounter{ctrl: ctrl} + mock.recorder = &MockLinuxGuestPlan9MounterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLinuxGuestPlan9Mounter) EXPECT() *MockLinuxGuestPlan9MounterMockRecorder { + return m.recorder +} + +// AddLCOWMappedDirectory mocks base method. +func (m *MockLinuxGuestPlan9Mounter) AddLCOWMappedDirectory(ctx context.Context, settings guestresource.LCOWMappedDirectory) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddLCOWMappedDirectory", ctx, settings) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddLCOWMappedDirectory indicates an expected call of AddLCOWMappedDirectory. +func (mr *MockLinuxGuestPlan9MounterMockRecorder) AddLCOWMappedDirectory(ctx, settings any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddLCOWMappedDirectory", reflect.TypeOf((*MockLinuxGuestPlan9Mounter)(nil).AddLCOWMappedDirectory), ctx, settings) +} + +// MockLinuxGuestPlan9Unmounter is a mock of LinuxGuestPlan9Unmounter interface. +type MockLinuxGuestPlan9Unmounter struct { + ctrl *gomock.Controller + recorder *MockLinuxGuestPlan9UnmounterMockRecorder + isgomock struct{} +} + +// MockLinuxGuestPlan9UnmounterMockRecorder is the mock recorder for MockLinuxGuestPlan9Unmounter. +type MockLinuxGuestPlan9UnmounterMockRecorder struct { + mock *MockLinuxGuestPlan9Unmounter +} + +// NewMockLinuxGuestPlan9Unmounter creates a new mock instance. +func NewMockLinuxGuestPlan9Unmounter(ctrl *gomock.Controller) *MockLinuxGuestPlan9Unmounter { + mock := &MockLinuxGuestPlan9Unmounter{ctrl: ctrl} + mock.recorder = &MockLinuxGuestPlan9UnmounterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLinuxGuestPlan9Unmounter) EXPECT() *MockLinuxGuestPlan9UnmounterMockRecorder { + return m.recorder +} + +// RemoveLCOWMappedDirectory mocks base method. +func (m *MockLinuxGuestPlan9Unmounter) RemoveLCOWMappedDirectory(ctx context.Context, settings guestresource.LCOWMappedDirectory) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveLCOWMappedDirectory", ctx, settings) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveLCOWMappedDirectory indicates an expected call of RemoveLCOWMappedDirectory. +func (mr *MockLinuxGuestPlan9UnmounterMockRecorder) RemoveLCOWMappedDirectory(ctx, settings any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveLCOWMappedDirectory", reflect.TypeOf((*MockLinuxGuestPlan9Unmounter)(nil).RemoveLCOWMappedDirectory), ctx, settings) +} diff --git a/internal/controller/device/plan9/mount/mount.go b/internal/controller/device/plan9/mount/mount.go new file mode 100644 index 0000000000..18badfd72d --- /dev/null +++ b/internal/controller/device/plan9/mount/mount.go @@ -0,0 +1,169 @@ +//go:build windows + +package mount + +import ( + "context" + "fmt" + + "github.com/sirupsen/logrus" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/protocol/guestresource" + "github.com/Microsoft/hcsshim/internal/vm/vmutils" +) + +// GuestPathFmt is the guest path template for Plan9 mounts on LCOW. +// The path encodes the share name so that each share gets a unique, +// stable mount point. Example: +// +// /run/mounts/plan9/ +const GuestPathFmt = "/run/mounts/plan9/%s" + +// Mount represents a single Plan9 share mount inside a Hyper-V guest VM. It +// tracks the mount lifecycle and supports reference counting so multiple +// callers can share the same physical guest mount. +// +// All operations on a [Mount] are expected to be ordered by the caller. +// No locking is performed at this layer. +type Mount struct { + // shareName is the HCS-level identifier for the parent share. + shareName string + + // config is the immutable guest-side mount configuration supplied at construction. + config Config + + // state tracks the current lifecycle position of this mount. + state State + + // refCount is the number of active callers sharing this mount. + // The guest unmount is issued only when it drops to zero. + refCount int + + // guestPath is the auto-generated path inside the guest where the + // share is mounted. Valid only in [StateMounted]. + guestPath string +} + +// NewReserved creates a new [Mount] in the [StateReserved] state with the +// provided share name and guest-side mount configuration. +func NewReserved(shareName string, config Config) *Mount { + return &Mount{ + shareName: shareName, + config: config, + state: StateReserved, + refCount: 1, + guestPath: fmt.Sprintf(GuestPathFmt, shareName), + } +} + +// State returns the current lifecycle state of the mount. +func (m *Mount) State() State { + return m.state +} + +// GuestPath returns the path inside the guest where the share is mounted. +func (m *Mount) GuestPath() string { + return m.guestPath +} + +// Reserve increments the reference count on this mount, allowing an additional +// caller to share the same guest path. +func (m *Mount) Reserve(config Config) error { + if !m.config.Equals(config) { + return fmt.Errorf("cannot reserve mount with different config") + } + + if m.state != StateReserved && m.state != StateMounted { + return fmt.Errorf("cannot reserve mount in state %s", m.state) + } + + m.refCount++ + return nil +} + +// MountToGuest issues the guest-side mount operation and returns the guest +// path. +func (m *Mount) MountToGuest(ctx context.Context, guest LinuxGuestPlan9Mounter) (string, error) { + ctx, _ = log.WithContext(ctx, logrus.WithField("shareName", m.shareName)) + + // Drive the mount state machine. + switch m.state { + case StateReserved: + log.G(ctx).Debug("mounting Plan9 share in guest") + + // Issue the guest mount via the GCS mapped directory API. + if err := guest.AddLCOWMappedDirectory(ctx, guestresource.LCOWMappedDirectory{ + MountPath: m.guestPath, + ShareName: m.shareName, + Port: vmutils.Plan9Port, + ReadOnly: m.config.ReadOnly, + }); err != nil { + // Move to unmounted since no guest state was established from Reserved. + m.state = StateUnmounted + return "", fmt.Errorf("add LCOW mapped directory share=%s: %w", m.shareName, err) + } + + m.state = StateMounted + // Note we don't increment the ref count here as the caller of + // MountToGuest is responsible for calling it once per reservation. + + log.G(ctx).WithField(logfields.UVMPath, m.guestPath).Debug("Plan9 share mounted in guest") + return m.guestPath, nil + + case StateMounted: + // Already mounted — the caller holds a reservation so return the + // existing guest path directly. + return m.guestPath, nil + + case StateUnmounted: + return "", fmt.Errorf("cannot mount a share in state %s", m.state) + } + return "", nil +} + +// UnmountFromGuest decrements the reference count and, when it reaches zero, +// issues the guest-side unmount. +func (m *Mount) UnmountFromGuest(ctx context.Context, guest LinuxGuestPlan9Unmounter) error { + ctx, _ = log.WithContext(ctx, logrus.WithField("shareName", m.shareName)) + + // Drive the state machine. + switch m.state { + case StateReserved: + // No guest work to do, just decrement the ref count and if it hits zero we are done. + m.refCount-- + // Once we hit the last entry, we can transition to unmounted so that + // caller can understand the terminal state of this mount. + if m.refCount == 0 { + m.state = StateUnmounted + } + return nil + + case StateMounted: + if m.refCount == 1 { + log.G(ctx).Debug("unmounting Plan9 share from guest") + + // Last reference — issue the physical guest unmount. + if err := guest.RemoveLCOWMappedDirectory(ctx, guestresource.LCOWMappedDirectory{ + MountPath: m.guestPath, + ShareName: m.shareName, + Port: vmutils.Plan9Port, + ReadOnly: m.config.ReadOnly, + }); err != nil { + return fmt.Errorf("remove LCOW mapped directory share=%s: %w", m.shareName, err) + } + + m.state = StateUnmounted + log.G(ctx).Debug("Plan9 share unmounted from guest") + } + m.refCount-- + return nil + + case StateUnmounted: + // Already in the terminal state — nothing to do. This can happen when + // MountToGuest failed (it transitions StateReserved → StateUnmounted), + // and the caller subsequently calls UnmapFromGuest to clean up. + } + return nil +} diff --git a/internal/controller/device/plan9/mount/mount_test.go b/internal/controller/device/plan9/mount/mount_test.go new file mode 100644 index 0000000000..8341287bce --- /dev/null +++ b/internal/controller/device/plan9/mount/mount_test.go @@ -0,0 +1,330 @@ +//go:build windows && lcow + +package mount + +import ( + "context" + "errors" + "fmt" + "testing" + + "go.uber.org/mock/gomock" + + "github.com/Microsoft/hcsshim/internal/controller/device/plan9/mount/mocks" +) + +var ( + errGuestMount = errors.New("guest mount failed") + errGuestUnmount = errors.New("guest unmount failed") +) + +// ───────────────────────────────────────────────────────────────────────────── +// NewReserved +// ───────────────────────────────────────────────────────────────────────────── + +// TestNewReserved_InitialState verifies that a freshly created mount starts +// in StateReserved with refCount=1 and the correct guest path derived from +// the share name. +func TestNewReserved_InitialState(t *testing.T) { + m := NewReserved("share0", Config{ReadOnly: false}) + if m.State() != StateReserved { + t.Errorf("expected StateReserved, got %v", m.State()) + } + + expected := fmt.Sprintf(GuestPathFmt, "share0") + if m.GuestPath() != expected { + t.Errorf("expected guest path %q, got %q", expected, m.GuestPath()) + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// Reserve +// ───────────────────────────────────────────────────────────────────────────── + +// TestReserve_SameConfig_IncrementsRefCount verifies that reserving with the +// same config increments the internal refCount from 1 to 2. +func TestReserve_SameConfig_IncrementsRefCount(t *testing.T) { + m := NewReserved("share0", Config{ReadOnly: false}) + if err := m.Reserve(Config{ReadOnly: false}); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if m.refCount != 2 { + t.Errorf("expected refCount=2, got %d", m.refCount) + } +} + +// TestReserve_DifferentConfig_Errors verifies that reserving with a different +// config (e.g. ReadOnly mismatch) returns an error. +func TestReserve_DifferentConfig_Errors(t *testing.T) { + m := NewReserved("share0", Config{ReadOnly: false}) + err := m.Reserve(Config{ReadOnly: true}) + if err == nil { + t.Fatal("expected error when reserving with different config") + } +} + +// TestReserve_OnUnmountedMount_Errors verifies that calling Reserve on a mount +// that has already reached StateUnmounted returns an error; the terminal state +// cannot accept new reservations. +func TestReserve_OnUnmountedMount_Errors(t *testing.T) { + ctrl := gomock.NewController(t) + guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) + guestUnmount := mocks.NewMockLinuxGuestPlan9Unmounter(ctrl) + + m := NewReserved("share0", Config{}) + + // Mount then unmount to reach StateUnmounted. + guest.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _, _ = m.MountToGuest(context.Background(), guest) + + guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _ = m.UnmountFromGuest(context.Background(), guestUnmount) + + if m.State() != StateUnmounted { + t.Fatalf("expected StateUnmounted, got %v", m.State()) + } + + err := m.Reserve(Config{}) + if err == nil { + t.Fatal("expected error when reserving on unmounted mount") + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// MountToGuest +// ───────────────────────────────────────────────────────────────────────────── + +// TestMountToGuest_HappyPath verifies that a mount in StateReserved transitions +// to StateMounted after a successful guest AddLCOWMappedDirectory call and +// returns the expected guest path. +func TestMountToGuest_HappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) + + m := NewReserved("share0", Config{ReadOnly: true}) + + guest.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + + guestPath, err := m.MountToGuest(context.Background(), guest) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + expected := fmt.Sprintf(GuestPathFmt, "share0") + if guestPath != expected { + t.Errorf("expected guest path %q, got %q", expected, guestPath) + } + if m.State() != StateMounted { + t.Errorf("expected StateMounted, got %v", m.State()) + } +} + +// TestMountToGuest_GuestFails_TransitionsToUnmounted verifies that when the +// guest AddLCOWMappedDirectory call fails, the mount transitions directly from +// StateReserved to StateUnmounted (the terminal state). +func TestMountToGuest_GuestFails_TransitionsToUnmounted(t *testing.T) { + ctrl := gomock.NewController(t) + guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) + + m := NewReserved("share0", Config{}) + + guest.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(errGuestMount) + + _, err := m.MountToGuest(context.Background(), guest) + if err == nil { + t.Fatal("expected error") + } + if m.State() != StateUnmounted { + t.Errorf("expected StateUnmounted after mount failure, got %v", m.State()) + } +} + +// TestMountToGuest_AlreadyMounted_Idempotent verifies that calling MountToGuest +// a second time on a StateMounted mount returns the existing guest path without +// issuing a new AddLCOWMappedDirectory call to the guest. +func TestMountToGuest_AlreadyMounted_Idempotent(t *testing.T) { + ctrl := gomock.NewController(t) + guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) + + m := NewReserved("share0", Config{}) + + // First mount — guest call must happen exactly once. + guest.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil).Times(1) + _, _ = m.MountToGuest(context.Background(), guest) + + // Second mount on the same mount object — must NOT re-issue guest call. + guestPath, err := m.MountToGuest(context.Background(), guest) + if err != nil { + t.Fatalf("unexpected error on second mount: %v", err) + } + if m.State() != StateMounted { + t.Errorf("expected StateMounted, got %v", m.State()) + } + expected := fmt.Sprintf(GuestPathFmt, "share0") + if guestPath != expected { + t.Errorf("expected %q, got %q", expected, guestPath) + } +} + +// TestMountToGuest_OnUnmounted_Errors verifies that calling MountToGuest on a +// mount in the terminal StateUnmounted returns an error. +func TestMountToGuest_OnUnmounted_Errors(t *testing.T) { + ctrl := gomock.NewController(t) + guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) + + m := NewReserved("share0", Config{}) + guest.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(errGuestMount) + _, _ = m.MountToGuest(context.Background(), guest) + + // Try to mount again from StateUnmounted. + _, err := m.MountToGuest(context.Background(), guest) + if err == nil { + t.Fatal("expected error when mounting from StateUnmounted") + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// UnmountFromGuest +// ───────────────────────────────────────────────────────────────────────────── + +// TestUnmountFromGuest_HappyPath verifies that unmounting a StateMounted mount +// with refCount=1 issues a guest RemoveLCOWMappedDirectory call and transitions +// the mount to the terminal StateUnmounted. +func TestUnmountFromGuest_HappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) + guestUnmount := mocks.NewMockLinuxGuestPlan9Unmounter(ctrl) + + m := NewReserved("share0", Config{}) + guest.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _, _ = m.MountToGuest(context.Background(), guest) + + guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + if err := m.UnmountFromGuest(context.Background(), guestUnmount); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if m.State() != StateUnmounted { + t.Errorf("expected StateUnmounted, got %v", m.State()) + } +} + +// TestUnmountFromGuest_GuestFails_StaysInMounted verifies that a failed guest +// RemoveLCOWMappedDirectory call leaves the mount in StateMounted so the caller +// can retry. +func TestUnmountFromGuest_GuestFails_StaysInMounted(t *testing.T) { + ctrl := gomock.NewController(t) + guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) + guestUnmount := mocks.NewMockLinuxGuestPlan9Unmounter(ctrl) + + m := NewReserved("share0", Config{}) + guest.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _, _ = m.MountToGuest(context.Background(), guest) + + guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(errGuestUnmount) + err := m.UnmountFromGuest(context.Background(), guestUnmount) + if err == nil { + t.Fatal("expected error") + } + if m.State() != StateMounted { + t.Errorf("expected state to remain StateMounted after failed unmount, got %v", m.State()) + } +} + +// TestUnmountFromGuest_GuestFails_ThenRetrySucceeds verifies that after a failed +// guest unmount (mount stays StateMounted), a second UnmountFromGuest attempt +// succeeds and transitions the mount to StateUnmounted. +func TestUnmountFromGuest_GuestFails_ThenRetrySucceeds(t *testing.T) { + ctrl := gomock.NewController(t) + guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) + guestUnmount := mocks.NewMockLinuxGuestPlan9Unmounter(ctrl) + + m := NewReserved("share0", Config{}) + guest.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _, _ = m.MountToGuest(context.Background(), guest) + + guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(errGuestUnmount) + _ = m.UnmountFromGuest(context.Background(), guestUnmount) + + // Retry with success. + guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + if err := m.UnmountFromGuest(context.Background(), guestUnmount); err != nil { + t.Fatalf("retry unmount failed: %v", err) + } + if m.State() != StateUnmounted { + t.Errorf("expected StateUnmounted after retry, got %v", m.State()) + } +} + +// TestUnmountFromGuest_RefCounting_NoGuestCallUntilLastRef verifies that with +// refCount=2, the first UnmountFromGuest only decrements the refCount (no guest +// call), and the second UnmountFromGuest issues the physical guest unmount and +// transitions the mount to StateUnmounted. +func TestUnmountFromGuest_RefCounting_NoGuestCallUntilLastRef(t *testing.T) { + ctrl := gomock.NewController(t) + guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) + guestUnmount := mocks.NewMockLinuxGuestPlan9Unmounter(ctrl) + + m := NewReserved("share0", Config{}) + // Add a second reservation. + _ = m.Reserve(Config{}) + + guest.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _, _ = m.MountToGuest(context.Background(), guest) + + // First unmount: drops refCount to 1, no guest call. + if err := m.UnmountFromGuest(context.Background(), guestUnmount); err != nil { + t.Fatalf("first unmount error: %v", err) + } + if m.State() != StateMounted { + t.Errorf("expected StateMounted after first unmount, got %v", m.State()) + } + if m.refCount != 1 { + t.Errorf("expected refCount=1 after first unmount, got %d", m.refCount) + } + + // Second unmount: drops refCount to 0, guest call issued. + guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + if err := m.UnmountFromGuest(context.Background(), guestUnmount); err != nil { + t.Fatalf("second unmount error: %v", err) + } + if m.State() != StateUnmounted { + t.Errorf("expected StateUnmounted after last unmount, got %v", m.State()) + } +} + +// TestUnmountFromGuest_OnReserved_NoGuestCall verifies that unmounting a mount +// that was never mounted (still in StateReserved) transitions directly to +// StateUnmounted without issuing any guest call. +func TestUnmountFromGuest_OnReserved_NoGuestCall(t *testing.T) { + ctrl := gomock.NewController(t) + guestUnmount := mocks.NewMockLinuxGuestPlan9Unmounter(ctrl) + + // Never mounted — never calls guest unmount. + m := NewReserved("share0", Config{}) + if err := m.UnmountFromGuest(context.Background(), guestUnmount); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if m.State() != StateUnmounted { + t.Errorf("expected StateUnmounted, got %v", m.State()) + } +} + +// TestUnmountFromGuest_OnUnmounted_NoOp verifies that calling UnmountFromGuest +// on a mount already in the terminal StateUnmounted is a no-op that returns +// no error and does not issue any guest call. +func TestUnmountFromGuest_OnUnmounted_NoOp(t *testing.T) { + ctrl := gomock.NewController(t) + guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) + guestUnmount := mocks.NewMockLinuxGuestPlan9Unmounter(ctrl) + + m := NewReserved("share0", Config{}) + guest.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _, _ = m.MountToGuest(context.Background(), guest) + + guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _ = m.UnmountFromGuest(context.Background(), guestUnmount) + + // Second unmount on StateUnmounted — must be a no-op with no error. + if err := m.UnmountFromGuest(context.Background(), guestUnmount); err != nil { + t.Fatalf("unexpected error on double unmount: %v", err) + } +} diff --git a/internal/controller/device/plan9/mount/state.go b/internal/controller/device/plan9/mount/state.go new file mode 100644 index 0000000000..af665a0c30 --- /dev/null +++ b/internal/controller/device/plan9/mount/state.go @@ -0,0 +1,58 @@ +//go:build windows + +package mount + +// State represents the current lifecycle state of a Plan9 mount inside +// the guest. +// +// The normal progression is: +// +// StateReserved → StateMounted → StateUnmounted +// +// Mount failure from StateReserved transitions directly to the terminal +// StateUnmounted state; the entry is then removed by the parent [share.Share]. +// An unmount failure from StateMounted leaves the mount in StateMounted so +// the caller can retry. +// +// Full state-transition table: +// +// Current State │ Trigger │ Next State +// ────────────────┼────────────────────────────────────────────┼────────────────────── +// StateReserved │ guest mount succeeds │ StateMounted +// StateReserved │ guest mount fails │ StateUnmounted +// StateReserved │ UnmountFromGuest (refCount > 1) │ StateReserved (ref--) +// StateReserved │ UnmountFromGuest (refCount == 1) │ StateUnmounted +// StateMounted │ UnmountFromGuest (refCount > 1) │ StateMounted (ref--) +// StateMounted │ UnmountFromGuest (refCount == 1) succeeds │ StateUnmounted +// StateMounted │ UnmountFromGuest (refCount == 1) fails │ StateMounted +// StateUnmounted │ UnmountFromGuest │ StateUnmounted (no-op) +// StateUnmounted │ (terminal — entry removed from share) │ — +type State int + +const ( + // StateReserved is the initial state; the mount entry has been created + // but the guest mount operation has not yet been issued. + StateReserved State = iota + + // StateMounted means the share has been successfully mounted inside + // the guest. The guest path is valid from this state onward. + StateMounted + + // StateUnmounted means the guest has unmounted the share. This is a + // terminal state; the entry is removed from the parent share. + StateUnmounted +) + +// String returns a human-readable name for the [State]. +func (s State) String() string { + switch s { + case StateReserved: + return "Reserved" + case StateMounted: + return "Mounted" + case StateUnmounted: + return "Unmounted" + default: + return "Unknown" + } +} diff --git a/internal/controller/device/plan9/mount/types.go b/internal/controller/device/plan9/mount/types.go new file mode 100644 index 0000000000..964f33fc0a --- /dev/null +++ b/internal/controller/device/plan9/mount/types.go @@ -0,0 +1,32 @@ +//go:build windows + +package mount + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/protocol/guestresource" +) + +// Config describes how a Plan9 share should be mounted inside the guest. +type Config struct { + // ReadOnly mounts the share read-only. + ReadOnly bool +} + +// Equals reports whether two mount Config values describe the same mount parameters. +func (c Config) Equals(other Config) bool { + return c.ReadOnly == other.ReadOnly +} + +// LinuxGuestPlan9Mounter mounts a Plan9 share inside an LCOW guest. +type LinuxGuestPlan9Mounter interface { + // AddLCOWMappedDirectory maps a Plan9 share into the LCOW guest. + AddLCOWMappedDirectory(ctx context.Context, settings guestresource.LCOWMappedDirectory) error +} + +// LinuxGuestPlan9Unmounter unmounts a Plan9 share from an LCOW guest. +type LinuxGuestPlan9Unmounter interface { + // RemoveLCOWMappedDirectory unmaps a Plan9 share from the LCOW guest. + RemoveLCOWMappedDirectory(ctx context.Context, settings guestresource.LCOWMappedDirectory) error +} diff --git a/internal/controller/device/plan9/plan9.go b/internal/controller/device/plan9/plan9.go deleted file mode 100644 index a8b7cc6210..0000000000 --- a/internal/controller/device/plan9/plan9.go +++ /dev/null @@ -1,252 +0,0 @@ -//go:build windows && !wcow - -package plan9 - -import ( - "context" - "fmt" - "strconv" - "sync" - - "github.com/Microsoft/hcsshim/internal/hcs" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/vm/vmutils" - - "github.com/sirupsen/logrus" -) - -// share-flag constants used in Plan9 HCS requests. -// -// These are marked private in the HCS schema. When public variants become -// available, we should replace these. -const ( - shareFlagsReadOnly int32 = 0x00000001 - shareFlagsLinuxMetadata int32 = 0x00000004 - shareFlagsRestrictFileAccess int32 = 0x00000080 -) - -// Manager is the concrete implementation which manages plan9 shares to the UVM. -type Manager struct { - // mu protects the shares map and serializes name allocation across concurrent callers. - mu sync.Mutex - - // shares maps share name → shareEntry for every active or pending share. - // Access must be guarded by mu. - shares map[string]*shareEntry - - // noWritableFileShares disallows adding writable Plan9 shares. - noWritableFileShares bool - - // vmPlan9Mgr performs host-side Plan9 add/remove on the VM. - vmPlan9Mgr vmPlan9Manager - - // nameCounter is the monotonically increasing index used to generate unique share names. - // Access must be guarded by mu. - nameCounter uint64 -} - -// New creates a ready-to-use [Manager]. -func New( - vmPlan9Mgr vmPlan9Manager, - noWritableFileShares bool, -) *Manager { - return &Manager{ - vmPlan9Mgr: vmPlan9Mgr, - noWritableFileShares: noWritableFileShares, - shares: make(map[string]*shareEntry), - } -} - -// ResolveShareName pre-emptively allocates a share name for the given [AddOptions] and returns it. -// If a matching share is already tracked, the existing name is returned without -// allocating a new entry. ResolveShareName does not drive any HCS call or increment the -// reference count; callers must follow up with [Manager.AddToVM] to claim the share. -func (m *Manager) ResolveShareName(ctx context.Context, opts *AddOptions) (string, error) { - if !opts.ReadOnly && m.noWritableFileShares { - return "", fmt.Errorf("adding writable shares is denied: %w", hcs.ErrOperationDenied) - } - - log.G(ctx).WithField(logfields.HostPath, opts.HostPath).Debug("resolving plan9 share name") - - entry := m.getOrAllocateEntry(ctx, opts) - return entry.name, nil -} - -// AddToVM adds a Plan9 share to the host VM and returns the generated share name. -// If a share with identical [AddOptions] is already added or in flight, AddToVM -// blocks until that operation completes and returns the share name, incrementing -// the internal reference count. -func (m *Manager) AddToVM(ctx context.Context, opts *AddOptions) (_ string, err error) { - // Validate write-share policy before touching shared state. - if !opts.ReadOnly && m.noWritableFileShares { - return "", fmt.Errorf("adding writable shares is denied: %w", hcs.ErrOperationDenied) - } - - entry := m.getOrAllocateEntry(ctx, opts) - - // Acquire the per-entry lock to check state and potentially drive the HCS call. - // Multiple goroutines requesting the same share will serialize here. - entry.mu.Lock() - defer entry.mu.Unlock() - - ctx, _ = log.WithContext(ctx, logrus.WithField("shareName", entry.name)) - - log.G(ctx).Debug("received share entry, checking state") - - switch entry.state { - case shareAdded: - // ============================================================================== - // Found an existing live share — reuse it. - // ============================================================================== - entry.refCount++ - log.G(ctx).Debug("plan9 share already added to VM, reusing existing share") - return entry.name, nil - - case sharePending: - // ============================================================================== - // New share — we own the HCS call. - // Other callers requesting the same share will block on entry.mu until we - // transition the state out of sharePending. - // ============================================================================== - flags := shareFlagsLinuxMetadata - if opts.ReadOnly { - flags |= shareFlagsReadOnly - } - if opts.Restrict { - flags |= shareFlagsRestrictFileAccess - } - - log.G(ctx).WithFields(logrus.Fields{ - logfields.HostPath: opts.HostPath, - logfields.ReadOnly: opts.ReadOnly, - "RestrictFileAccess": opts.Restrict, - "AllowedFiles": opts.AllowedNames, - }).Trace("adding plan9 share to host VM") - - if err = m.vmPlan9Mgr.AddPlan9(ctx, hcsschema.Plan9Share{ - Name: entry.name, - AccessName: entry.name, - Path: opts.HostPath, - Port: vmutils.Plan9Port, - Flags: flags, - AllowedFiles: opts.AllowedNames, - }); err != nil { - // Transition to Invalid so that waiting goroutines see the real failure reason. - entry.state = shareInvalid - entry.stateErr = err - - // Remove from the map so subsequent calls can retry with a fresh entry. - m.mu.Lock() - delete(m.shares, entry.name) - m.mu.Unlock() - - return "", fmt.Errorf("add plan9 share %s to host: %w", entry.name, err) - } - - entry.state = shareAdded - entry.refCount++ - - log.G(ctx).Info("plan9 share added to host VM") - - return entry.name, nil - - case shareInvalid: - // ============================================================================== - // A previous AddPlan9 call for this entry failed. - // ============================================================================== - // Return the original error. The map entry has already been removed - // by the goroutine that drove the failed add. - return "", fmt.Errorf("previous attempt to add plan9 share %s to VM failed: %w", - entry.name, entry.stateErr) - - default: - return "", fmt.Errorf("plan9 share in unexpected state %s during add", entry.state) - } -} - -// getOrAllocateEntry either reuses an existing [shareEntry] whose options match opts, -// or allocates a new pending entry with a freshly generated name. -// The returned entry's refCount is not incremented; callers that claim the share -// must increment it themselves. -func (m *Manager) getOrAllocateEntry(ctx context.Context, opts *AddOptions) *shareEntry { - m.mu.Lock() - defer m.mu.Unlock() - - // Reuse an existing entry if its options match the caller's. - for _, existing := range m.shares { - if optionsMatch(existing.opts, opts) { - return existing - } - } - - log.G(ctx).Debug("no existing plan9 share found for options, allocating new entry") - - name := strconv.FormatUint(m.nameCounter, 10) - m.nameCounter++ - - entry := &shareEntry{ - opts: opts, - name: name, - state: sharePending, - // refCount is 0; it will be incremented by the goroutine that drives AddPlan9. - refCount: 0, - } - m.shares[name] = entry - return entry -} - -// RemoveFromVM removes the Plan9 share identified by shareName from the host VM. -// If the share is held by multiple callers, RemoveFromVM decrements the reference -// count and returns without tearing down the share until the last caller removes it. -func (m *Manager) RemoveFromVM(ctx context.Context, shareName string) error { - ctx, _ = log.WithContext(ctx, logrus.WithField("shareName", shareName)) - - m.mu.Lock() - entry := m.shares[shareName] - m.mu.Unlock() - - if entry == nil { - log.G(ctx).Debug("plan9 share not found, skipping removal") - return nil - } - - entry.mu.Lock() - defer entry.mu.Unlock() - - if entry.state == shareInvalid { - // AddPlan9 never succeeded; nothing to remove from HCS. - return nil - } - - if entry.refCount > 1 { - entry.refCount-- - log.G(ctx).Debug("plan9 share still in use by other callers, not removing from VM") - return nil - } - - // refCount is 0 (pre-allocated via ResolveShareName but never added) or 1 (last caller). - // Only call RemovePlan9 when the share was actually added to the VM. - if entry.state == shareAdded { - log.G(ctx).Debug("starting plan9 share removal") - - if err := m.vmPlan9Mgr.RemovePlan9(ctx, hcsschema.Plan9Share{ - Name: shareName, - AccessName: shareName, - Port: vmutils.Plan9Port, - }); err != nil { - return fmt.Errorf("remove plan9 share %s from host: %w", shareName, err) - } - - entry.state = shareRemoved - log.G(ctx).Info("plan9 share removed from host VM") - } - - // Clean up from the map regardless of whether AddPlan9 was ever called. - m.mu.Lock() - delete(m.shares, shareName) - m.mu.Unlock() - - return nil -} diff --git a/internal/controller/device/plan9/share/doc.go b/internal/controller/device/plan9/share/doc.go new file mode 100644 index 0000000000..3f64d75e1d --- /dev/null +++ b/internal/controller/device/plan9/share/doc.go @@ -0,0 +1,33 @@ +//go:build windows + +// Package share manages the lifecycle of a single Plan9 share attached to a +// Hyper-V VM, from host-side name allocation through guest-side mounting. +// +// # Overview +// +// [Share] is the primary type, representing one Plan9 share attached (or to be +// attached) to the VM. It tracks its own lifecycle state via [State] and +// delegates guest mount management to [mount.Mount]. +// +// All operations on a [Share] are expected to be ordered by the caller. +// No locking is performed at this layer. +// +// # Share Lifecycle +// +// ┌──────────────────────┐ +// │ StateReserved │ ← add failure → StateRemoved (not retriable) +// └──────────┬───────────┘ +// │ share added to VM +// ▼ +// ┌──────────────────────┐ +// │ StateAdded │ +// └──────────┬───────────┘ +// (guest mount driven here) +// │ mount released; +// │ share removed from VM +// ▼ +// ┌──────────────────────┐ +// │ StateRemoved │ +// └──────────────────────┘ +// (terminal — entry removed from controller) +package share diff --git a/internal/controller/device/plan9/share/mocks/mock_share.go b/internal/controller/device/plan9/share/mocks/mock_share.go new file mode 100644 index 0000000000..be1380b013 --- /dev/null +++ b/internal/controller/device/plan9/share/mocks/mock_share.go @@ -0,0 +1,94 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: types.go +// +// Generated by this command: +// +// mockgen -source types.go -package mocks -destination mocks/mock_share.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + gomock "go.uber.org/mock/gomock" +) + +// MockVMPlan9Adder is a mock of VMPlan9Adder interface. +type MockVMPlan9Adder struct { + ctrl *gomock.Controller + recorder *MockVMPlan9AdderMockRecorder + isgomock struct{} +} + +// MockVMPlan9AdderMockRecorder is the mock recorder for MockVMPlan9Adder. +type MockVMPlan9AdderMockRecorder struct { + mock *MockVMPlan9Adder +} + +// NewMockVMPlan9Adder creates a new mock instance. +func NewMockVMPlan9Adder(ctrl *gomock.Controller) *MockVMPlan9Adder { + mock := &MockVMPlan9Adder{ctrl: ctrl} + mock.recorder = &MockVMPlan9AdderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockVMPlan9Adder) EXPECT() *MockVMPlan9AdderMockRecorder { + return m.recorder +} + +// AddPlan9 mocks base method. +func (m *MockVMPlan9Adder) AddPlan9(ctx context.Context, settings hcsschema.Plan9Share) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddPlan9", ctx, settings) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddPlan9 indicates an expected call of AddPlan9. +func (mr *MockVMPlan9AdderMockRecorder) AddPlan9(ctx, settings any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPlan9", reflect.TypeOf((*MockVMPlan9Adder)(nil).AddPlan9), ctx, settings) +} + +// MockVMPlan9Remover is a mock of VMPlan9Remover interface. +type MockVMPlan9Remover struct { + ctrl *gomock.Controller + recorder *MockVMPlan9RemoverMockRecorder + isgomock struct{} +} + +// MockVMPlan9RemoverMockRecorder is the mock recorder for MockVMPlan9Remover. +type MockVMPlan9RemoverMockRecorder struct { + mock *MockVMPlan9Remover +} + +// NewMockVMPlan9Remover creates a new mock instance. +func NewMockVMPlan9Remover(ctrl *gomock.Controller) *MockVMPlan9Remover { + mock := &MockVMPlan9Remover{ctrl: ctrl} + mock.recorder = &MockVMPlan9RemoverMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockVMPlan9Remover) EXPECT() *MockVMPlan9RemoverMockRecorder { + return m.recorder +} + +// RemovePlan9 mocks base method. +func (m *MockVMPlan9Remover) RemovePlan9(ctx context.Context, settings hcsschema.Plan9Share) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemovePlan9", ctx, settings) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemovePlan9 indicates an expected call of RemovePlan9. +func (mr *MockVMPlan9RemoverMockRecorder) RemovePlan9(ctx, settings any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePlan9", reflect.TypeOf((*MockVMPlan9Remover)(nil).RemovePlan9), ctx, settings) +} diff --git a/internal/controller/device/plan9/share/share.go b/internal/controller/device/plan9/share/share.go new file mode 100644 index 0000000000..3f28c57692 --- /dev/null +++ b/internal/controller/device/plan9/share/share.go @@ -0,0 +1,244 @@ +//go:build windows + +package share + +import ( + "context" + "fmt" + + "github.com/sirupsen/logrus" + + "github.com/Microsoft/hcsshim/internal/controller/device/plan9/mount" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/vm/vmutils" +) + +// share-flag constants used in Plan9 HCS requests. +// +// These are marked private in the HCS schema. When public variants become +// available, we should replace these. +const ( + shareFlagsReadOnly int32 = 0x00000001 + shareFlagsLinuxMetadata int32 = 0x00000004 + shareFlagsRestrictFileAccess int32 = 0x00000080 +) + +// Share represents a Plan9 share attached to a Hyper-V VM. It tracks the +// share lifecycle and delegates guest mount management to [mount.Mount]. +// +// All operations on a [Share] are expected to be ordered by the caller. +// No locking is performed at this layer. +type Share struct { + // name is the HCS-level identifier for this share. + name string + + // config is the immutable host-side share configuration supplied at construction. + config Config + + // state tracks the current lifecycle position of this share. + state State + + // mount is the guest-side mount for this share. + // nil until ReserveMount is called. "mount != nil" serves as the ref indicator. + mount *mount.Mount +} + +// NewReserved creates a new [Share] in the [StateReserved] state with the +// provided name and host-side share configuration. +func NewReserved(name string, config Config) *Share { + return &Share{ + name: name, + config: config, + state: StateReserved, + } +} + +// State returns the current lifecycle state of the share. +func (s *Share) State() State { + return s.state +} + +// Config returns the host-side share configuration. +func (s *Share) Config() Config { + return s.config +} + +// Name returns the HCS-level share name. +func (s *Share) Name() string { + return s.name +} + +// HostPath returns the host-side path of the share. +func (s *Share) HostPath() string { + return s.config.HostPath +} + +// GuestPath returns the guest-side mount path of the share. +// Valid only if a mount has been reserved on this share; otherwise returns an empty string. +func (s *Share) GuestPath() string { + if s.mount == nil { + return "" + } + return s.mount.GuestPath() +} + +// AddToVM adds the share to the VM's Plan9 provider. It is idempotent for an +// already-added share; on failure the share is moved into removed state and +// a new [Share] must be created to retry. +func (s *Share) AddToVM(ctx context.Context, vm VMPlan9Adder) error { + + // Drive the state machine. + switch s.state { + case StateReserved: + log.G(ctx).WithFields(logrus.Fields{ + logfields.HostPath: s.config.HostPath, + "shareName": s.name, + }).Debug("adding Plan9 share to VM") + + // Build the HCS flags from the share config. + flags := shareFlagsLinuxMetadata + if s.config.ReadOnly { + flags |= shareFlagsReadOnly + } + if s.config.Restrict { + flags |= shareFlagsRestrictFileAccess + } + + // Attempt to add the share to the VM. + if err := vm.AddPlan9(ctx, hcsschema.Plan9Share{ + Name: s.name, + AccessName: s.name, + Path: s.config.HostPath, + Port: vmutils.Plan9Port, + Flags: flags, + AllowedFiles: s.config.AllowedNames, + }); err != nil { + // Since the share was never added, move directly to the terminal + // Removed state. No guest state was established, so there is nothing + // to clean up. + s.state = StateRemoved + return fmt.Errorf("add Plan9 share %s to VM: %w", s.name, err) + } + + // Move to added state after a successful add. + s.state = StateAdded + log.G(ctx).Debug("Plan9 share added to VM") + return nil + + case StateAdded: + // Already added — no-op. + return nil + + case StateRemoved: + // Re-adding a removed share is not supported. + return fmt.Errorf("share %s already removed", s.name) + default: + } + + return nil +} + +// RemoveFromVM removes the share from the VM. It is idempotent for a share +// that was never added or is already removed; a failed removal is retriable +// by calling RemoveFromVM again. +func (s *Share) RemoveFromVM(ctx context.Context, vm VMPlan9Remover) error { + ctx, _ = log.WithContext(ctx, logrus.WithField("shareName", s.name)) + + switch s.state { + case StateReserved: + // Share was never added — move directly to removed. + s.state = StateRemoved + + case StateAdded: + // If the mount is still active, skip removal. + if s.mount != nil { + return nil + } + + log.G(ctx).Debug("removing Plan9 share from VM") + + // Remove the share from the VM. + if err := vm.RemovePlan9(ctx, hcsschema.Plan9Share{ + Name: s.name, + AccessName: s.name, + Port: vmutils.Plan9Port, + }); err != nil { + // Leave the share in StateAdded so the caller can retry. + return fmt.Errorf("remove Plan9 share %s from VM: %w", s.name, err) + } + + s.state = StateRemoved + log.G(ctx).Debug("Plan9 share removed from VM") + + case StateRemoved: + // Already fully removed — no-op. + } + + return nil +} + +// ReserveMount reserves a slot for a guest mount on this share. If a mount +// already exists, it increments the reference count after verifying the config +// matches. +func (s *Share) ReserveMount(ctx context.Context, config mount.Config) (*mount.Mount, error) { + if s.state != StateReserved && s.state != StateAdded { + return nil, fmt.Errorf("cannot reserve mount on share in state %s", s.state) + } + + ctx, _ = log.WithContext(ctx, logrus.WithField("shareName", s.name)) + + // If a mount already exists for this share, bump its ref count. + if s.mount != nil { + if err := s.mount.Reserve(config); err != nil { + return nil, fmt.Errorf("reserve mount on share %s: %w", s.name, err) + } + + log.G(ctx).Trace("existing mount found for share, incrementing ref count") + return s.mount, nil + } + + // No existing mount — create one in the reserved state. + newMount := mount.NewReserved(s.name, config) + s.mount = newMount + + log.G(ctx).Trace("reserved new mount for share") + return newMount, nil +} + +// MountToGuest mounts the share inside the guest, returning the guest path. +// The mount must first be reserved via [Share.ReserveMount]. +func (s *Share) MountToGuest(ctx context.Context, guest mount.LinuxGuestPlan9Mounter) (string, error) { + if s.state != StateAdded { + return "", fmt.Errorf("cannot mount share in state %s, expected added", s.state) + } + + // Look up the pre-reserved mount for this share. + if s.mount == nil { + return "", fmt.Errorf("mount not reserved on share %s", s.name) + } + return s.mount.MountToGuest(ctx, guest) +} + +// UnmountFromGuest unmounts the share from the guest. When the mount's +// reference count reaches zero and it transitions to the unmounted state, +// the mount entry is removed from the share so a subsequent +// [Share.RemoveFromVM] call sees no active mount. +func (s *Share) UnmountFromGuest(ctx context.Context, guest mount.LinuxGuestPlan9Unmounter) error { + if s.mount == nil { + // No mount found — treat as a no-op to support retry by callers. + return nil + } + + if err := s.mount.UnmountFromGuest(ctx, guest); err != nil { + return fmt.Errorf("unmount share %s from guest: %w", s.name, err) + } + + // If the mount reached the terminal unmounted state, remove it from the share + // so that RemoveFromVM correctly sees no active mount. + if s.mount.State() == mount.StateUnmounted { + s.mount = nil + } + return nil +} diff --git a/internal/controller/device/plan9/share/share_test.go b/internal/controller/device/plan9/share/share_test.go new file mode 100644 index 0000000000..3dda1cdcf7 --- /dev/null +++ b/internal/controller/device/plan9/share/share_test.go @@ -0,0 +1,431 @@ +//go:build windows && lcow + +package share + +import ( + "context" + "errors" + "testing" + + "go.uber.org/mock/gomock" + + "github.com/Microsoft/hcsshim/internal/controller/device/plan9/mount" + mountmocks "github.com/Microsoft/hcsshim/internal/controller/device/plan9/mount/mocks" + "github.com/Microsoft/hcsshim/internal/controller/device/plan9/share/mocks" +) + +var ( + errVMAdd = errors.New("VM add failed") + errVMRemove = errors.New("VM remove failed") +) + +func newTestConfig() Config { + return Config{ + HostPath: "/host/path", + ReadOnly: false, + Restrict: false, + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// NewReserved +// ───────────────────────────────────────────────────────────────────────────── + +// TestNewReserved_InitialState verifies that a freshly created share starts in +// StateReserved with the correct name, host path, and an empty guest path +// (no mount has been reserved yet). +func TestNewReserved_InitialState(t *testing.T) { + s := NewReserved("share0", newTestConfig()) + if s.State() != StateReserved { + t.Errorf("expected StateReserved, got %v", s.State()) + } + if s.Name() != "share0" { + t.Errorf("expected name %q, got %q", "share0", s.Name()) + } + if s.HostPath() != "/host/path" { + t.Errorf("expected host path %q, got %q", "/host/path", s.HostPath()) + } + if s.GuestPath() != "" { + t.Errorf("expected empty guest path before mount, got %q", s.GuestPath()) + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// AddToVM +// ───────────────────────────────────────────────────────────────────────────── + +// TestAddToVM_HappyPath verifies that a StateReserved share transitions to +// StateAdded after a successful VM AddPlan9 call. +func TestAddToVM_HappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + vm := mocks.NewMockVMPlan9Adder(ctrl) + + s := NewReserved("share0", newTestConfig()) + vm.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + + if err := s.AddToVM(context.Background(), vm); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if s.State() != StateAdded { + t.Errorf("expected StateAdded, got %v", s.State()) + } +} + +// TestAddToVM_VMFails_TransitionsToRemoved verifies that when the VM AddPlan9 +// call fails, the share transitions directly to StateRemoved. +func TestAddToVM_VMFails_TransitionsToRemoved(t *testing.T) { + ctrl := gomock.NewController(t) + vm := mocks.NewMockVMPlan9Adder(ctrl) + + s := NewReserved("share0", newTestConfig()) + vm.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) + + err := s.AddToVM(context.Background(), vm) + if err == nil { + t.Fatal("expected error") + } + if s.State() != StateRemoved { + t.Errorf("expected StateRemoved after VM add failure, got %v", s.State()) + } +} + +// TestAddToVM_AlreadyAdded_Idempotent verifies that calling AddToVM a second +// time on a StateAdded share is a no-op that does not issue another VM call. +func TestAddToVM_AlreadyAdded_Idempotent(t *testing.T) { + ctrl := gomock.NewController(t) + vm := mocks.NewMockVMPlan9Adder(ctrl) + + s := NewReserved("share0", newTestConfig()) + // VM call must happen exactly once. + vm.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil).Times(1) + + _ = s.AddToVM(context.Background(), vm) + + // Second call must be a no-op. + if err := s.AddToVM(context.Background(), vm); err != nil { + t.Fatalf("second AddToVM returned unexpected error: %v", err) + } + if s.State() != StateAdded { + t.Errorf("expected StateAdded, got %v", s.State()) + } +} + +// TestAddToVM_OnRemovedShare_Errors verifies that calling AddToVM on a share in +// the terminal StateRemoved returns an error without issuing any VM call. +func TestAddToVM_OnRemovedShare_Errors(t *testing.T) { + ctrl := gomock.NewController(t) + vm := mocks.NewMockVMPlan9Adder(ctrl) + + s := NewReserved("share0", newTestConfig()) + vm.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) + _ = s.AddToVM(context.Background(), vm) + + // Share is now StateRemoved — AddToVM must return an error. + err := s.AddToVM(context.Background(), vm) + if err == nil { + t.Fatal("expected error when adding a removed share") + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// RemoveFromVM +// ───────────────────────────────────────────────────────────────────────────── + +// TestRemoveFromVM_OnReserved_TransitionsToRemoved verifies that removing a +// share that was never added to the VM (still StateReserved) transitions +// directly to StateRemoved without issuing a VM RemovePlan9 call. +func TestRemoveFromVM_OnReserved_TransitionsToRemoved(t *testing.T) { + ctrl := gomock.NewController(t) + vm := mocks.NewMockVMPlan9Remover(ctrl) + + s := NewReserved("share0", newTestConfig()) + + // Share was never added — RemovePlan9 must NOT be called. + if err := s.RemoveFromVM(context.Background(), vm); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if s.State() != StateRemoved { + t.Errorf("expected StateRemoved, got %v", s.State()) + } +} + +// TestRemoveFromVM_HappyPath verifies the normal AddToVM → RemoveFromVM flow: +// the VM RemovePlan9 call is issued and the share transitions to StateRemoved. +func TestRemoveFromVM_HappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + vmAdd := mocks.NewMockVMPlan9Adder(ctrl) + vmRemove := mocks.NewMockVMPlan9Remover(ctrl) + + s := NewReserved("share0", newTestConfig()) + vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + _ = s.AddToVM(context.Background(), vmAdd) + + vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(nil) + if err := s.RemoveFromVM(context.Background(), vmRemove); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if s.State() != StateRemoved { + t.Errorf("expected StateRemoved, got %v", s.State()) + } +} + +// TestRemoveFromVM_VMFails_StaysInAdded verifies that a failed VM RemovePlan9 +// call leaves the share in StateAdded so the caller can retry. +func TestRemoveFromVM_VMFails_StaysInAdded(t *testing.T) { + ctrl := gomock.NewController(t) + vmAdd := mocks.NewMockVMPlan9Adder(ctrl) + vmRemove := mocks.NewMockVMPlan9Remover(ctrl) + + s := NewReserved("share0", newTestConfig()) + vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + _ = s.AddToVM(context.Background(), vmAdd) + + vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(errVMRemove) + err := s.RemoveFromVM(context.Background(), vmRemove) + if err == nil { + t.Fatal("expected error") + } + if s.State() != StateAdded { + t.Errorf("expected StateAdded after failed remove, got %v", s.State()) + } +} + +// TestRemoveFromVM_VMFails_ThenRetry verifies that after a failed VM remove +// (share stays StateAdded), a retry succeeds and transitions to StateRemoved. +func TestRemoveFromVM_VMFails_ThenRetry(t *testing.T) { + ctrl := gomock.NewController(t) + vmAdd := mocks.NewMockVMPlan9Adder(ctrl) + vmRemove := mocks.NewMockVMPlan9Remover(ctrl) + + s := NewReserved("share0", newTestConfig()) + vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + _ = s.AddToVM(context.Background(), vmAdd) + + vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(errVMRemove) + _ = s.RemoveFromVM(context.Background(), vmRemove) + + vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(nil) + if err := s.RemoveFromVM(context.Background(), vmRemove); err != nil { + t.Fatalf("retry RemoveFromVM failed: %v", err) + } + if s.State() != StateRemoved { + t.Errorf("expected StateRemoved after retry, got %v", s.State()) + } +} + +// TestRemoveFromVM_AlreadyRemoved_NoOp verifies that calling RemoveFromVM on a +// share already in the terminal StateRemoved is a no-op with no error and no +// duplicate VM call. +func TestRemoveFromVM_AlreadyRemoved_NoOp(t *testing.T) { + ctrl := gomock.NewController(t) + vmAdd := mocks.NewMockVMPlan9Adder(ctrl) + vmRemove := mocks.NewMockVMPlan9Remover(ctrl) + + s := NewReserved("share0", newTestConfig()) + vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + _ = s.AddToVM(context.Background(), vmAdd) + + vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(nil).Times(1) + _ = s.RemoveFromVM(context.Background(), vmRemove) + + // Second remove on StateRemoved must be a no-op with no error. + if err := s.RemoveFromVM(context.Background(), vmRemove); err != nil { + t.Fatalf("unexpected error on second remove: %v", err) + } +} + +// TestRemoveFromVM_WithActiveMountSkipsRemove verifies that RemoveFromVM on a +// StateAdded share with an active mount reservation does not issue the VM +// RemovePlan9 call and keeps the share in StateAdded. +func TestRemoveFromVM_WithActiveMountSkipsRemove(t *testing.T) { + ctrl := gomock.NewController(t) + vmAdd := mocks.NewMockVMPlan9Adder(ctrl) + vmRemove := mocks.NewMockVMPlan9Remover(ctrl) + + s := NewReserved("share0", newTestConfig()) + vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + _ = s.AddToVM(context.Background(), vmAdd) + + // Reserve a mount to simulate an active mount (mount != nil). + _, _ = s.ReserveMount(context.Background(), mount.Config{}) + + // RemovePlan9 must NOT be called while a mount is active. + if err := s.RemoveFromVM(context.Background(), vmRemove); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if s.State() != StateAdded { + t.Errorf("expected StateAdded (still held by mount), got %v", s.State()) + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// ReserveMount +// ───────────────────────────────────────────────────────────────────────────── + +// TestReserveMount_CreatesNewMount verifies that calling ReserveMount on a +// share without an existing mount creates a new mount in StateReserved. +func TestReserveMount_CreatesNewMount(t *testing.T) { + s := NewReserved("share0", newTestConfig()) + + m, err := s.ReserveMount(context.Background(), mount.Config{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if m == nil { + t.Fatal("expected non-nil mount") + } + if m.State() != mount.StateReserved { + t.Errorf("expected mount StateReserved, got %v", m.State()) + } +} + +// TestReserveMount_SameConfig_ReturnsSameMount verifies that a second +// ReserveMount call with the same config returns the same mount object +// (bumping its refCount) instead of creating a new one. +func TestReserveMount_SameConfig_ReturnsSameMount(t *testing.T) { + s := NewReserved("share0", newTestConfig()) + + m1, _ := s.ReserveMount(context.Background(), mount.Config{ReadOnly: false}) + m2, err := s.ReserveMount(context.Background(), mount.Config{ReadOnly: false}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if m1 != m2 { + t.Error("expected same mount object returned for same config") + } +} + +// TestReserveMount_DifferentConfig_Errors verifies that reserving a mount with +// a different config than the existing mount returns an error. +func TestReserveMount_DifferentConfig_Errors(t *testing.T) { + s := NewReserved("share0", newTestConfig()) + _, _ = s.ReserveMount(context.Background(), mount.Config{ReadOnly: false}) + + _, err := s.ReserveMount(context.Background(), mount.Config{ReadOnly: true}) + if err == nil { + t.Fatal("expected error when reserving mount with different config") + } +} + +// TestReserveMount_OnRemovedShare_Errors verifies that calling ReserveMount on +// a share that has reached the terminal StateRemoved returns an error. +func TestReserveMount_OnRemovedShare_Errors(t *testing.T) { + ctrl := gomock.NewController(t) + vm := mocks.NewMockVMPlan9Adder(ctrl) + + s := NewReserved("share0", newTestConfig()) + vm.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) + _ = s.AddToVM(context.Background(), vm) + + _, err := s.ReserveMount(context.Background(), mount.Config{}) + if err == nil { + t.Fatal("expected error when reserving mount on removed share") + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// MountToGuest +// ───────────────────────────────────────────────────────────────────────────── + +// TestMountToGuest_RequiresStateAdded verifies that MountToGuest fails when +// the share is still in StateReserved (not yet added to the VM). +func TestMountToGuest_RequiresStateAdded(t *testing.T) { + ctrl := gomock.NewController(t) + guestMount := mountmocks.NewMockLinuxGuestPlan9Mounter(ctrl) + + s := NewReserved("share0", newTestConfig()) + _, _ = s.ReserveMount(context.Background(), mount.Config{}) + + // Share is in StateReserved — mount must fail. + _, err := s.MountToGuest(context.Background(), guestMount) + if err == nil { + t.Fatal("expected error when mounting share not yet added to VM") + } +} + +// TestMountToGuest_RequiresReservedMount verifies that MountToGuest fails when +// no mount has been reserved on the share via ReserveMount. +func TestMountToGuest_RequiresReservedMount(t *testing.T) { + ctrl := gomock.NewController(t) + vmAdd := mocks.NewMockVMPlan9Adder(ctrl) + guestMount := mountmocks.NewMockLinuxGuestPlan9Mounter(ctrl) + + s := NewReserved("share0", newTestConfig()) + vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + _ = s.AddToVM(context.Background(), vmAdd) + + // No mount reserved — must return error. + _, err := s.MountToGuest(context.Background(), guestMount) + if err == nil { + t.Fatal("expected error when no mount is reserved") + } +} + +// TestMountToGuest_HappyPath verifies a full ReserveMount → AddToVM → +// MountToGuest flow: the guest AddLCOWMappedDirectory call succeeds and +// MountToGuest returns a non-empty guest path. +func TestMountToGuest_HappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + vmAdd := mocks.NewMockVMPlan9Adder(ctrl) + guestMount := mountmocks.NewMockLinuxGuestPlan9Mounter(ctrl) + + s := NewReserved("share0", newTestConfig()) + vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + _ = s.AddToVM(context.Background(), vmAdd) + _, _ = s.ReserveMount(context.Background(), mount.Config{}) + + guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + guestPath, err := s.MountToGuest(context.Background(), guestMount) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if guestPath == "" { + t.Error("expected non-empty guest path") + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// UnmountFromGuest +// ───────────────────────────────────────────────────────────────────────────── + +// TestUnmountFromGuest_HappyPath verifies a full mount → unmount cycle: after +// a successful guest RemoveLCOWMappedDirectory call the share's internal mount +// entry is cleared (GuestPath returns empty). +func TestUnmountFromGuest_HappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + vmAdd := mocks.NewMockVMPlan9Adder(ctrl) + guestMount := mountmocks.NewMockLinuxGuestPlan9Mounter(ctrl) + guestUnmount := mountmocks.NewMockLinuxGuestPlan9Unmounter(ctrl) + + s := NewReserved("share0", newTestConfig()) + vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + _ = s.AddToVM(context.Background(), vmAdd) + _, _ = s.ReserveMount(context.Background(), mount.Config{}) + + guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + _, _ = s.MountToGuest(context.Background(), guestMount) + + guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) + if err := s.UnmountFromGuest(context.Background(), guestUnmount); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // After unmount the share's internal mount entry should be cleared. + if s.GuestPath() != "" { + t.Errorf("expected empty GuestPath after unmount, got %q", s.GuestPath()) + } +} + +// TestUnmountFromGuest_NoMount_IsNoOp verifies that calling UnmountFromGuest on +// a share with no reserved mount is a no-op that returns no error. +func TestUnmountFromGuest_NoMount_IsNoOp(t *testing.T) { + ctrl := gomock.NewController(t) + guestUnmount := mountmocks.NewMockLinuxGuestPlan9Unmounter(ctrl) + + s := NewReserved("share0", newTestConfig()) + + // No mount reserved — must be a no-op. + if err := s.UnmountFromGuest(context.Background(), guestUnmount); err != nil { + t.Fatalf("unexpected error: %v", err) + } +} diff --git a/internal/controller/device/plan9/share/state.go b/internal/controller/device/plan9/share/state.go new file mode 100644 index 0000000000..b9096c3f4e --- /dev/null +++ b/internal/controller/device/plan9/share/state.go @@ -0,0 +1,54 @@ +//go:build windows + +package share + +// State represents the current lifecycle state of a Plan9 share. +// +// The normal progression is: +// +// StateReserved → StateAdded → StateRemoved +// +// Add failure from StateReserved transitions directly to the +// terminal StateRemoved state; create a new [Share] to retry. +// A VM removal failure from StateAdded leaves the share in StateAdded +// so the caller can retry only the removal step. +// +// Full state-transition table: +// +// Current State │ Trigger │ Next State +// ───────────────┼───────────────────────────────────────┼────────────────── +// StateReserved │ add succeeds │ StateAdded +// StateReserved │ add fails │ StateRemoved +// StateAdded │ mount still active │ StateAdded (no-op) +// StateAdded │ removal succeeds │ StateRemoved +// StateAdded │ removal fails │ StateAdded +// StateRemoved │ (terminal — no further transitions) │ — +type State int + +const ( + // StateReserved is the initial state; the share name has been allocated but + // the share has not yet been added to the VM. + StateReserved State = iota + + // StateAdded means the share has been successfully added to the VM. + // The guest mount is driven from this state. + StateAdded + + // StateRemoved means the share has been fully removed from the VM. + // This is a terminal state. + StateRemoved +) + +// String returns a human-readable name for the [State]. +func (s State) String() string { + switch s { + case StateReserved: + return "Reserved" + case StateAdded: + return "Added" + case StateRemoved: + return "Removed" + default: + return "Unknown" + } +} diff --git a/internal/controller/device/plan9/share/types.go b/internal/controller/device/plan9/share/types.go new file mode 100644 index 0000000000..c8830fa78a --- /dev/null +++ b/internal/controller/device/plan9/share/types.go @@ -0,0 +1,42 @@ +//go:build windows + +package share + +import ( + "context" + "slices" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// Config describes the host-side Plan9 share to add to the VM. +type Config struct { + // HostPath is the path on the host to share into the VM. + HostPath string + // ReadOnly specifies whether the share should be read-only. + ReadOnly bool + // Restrict enables single-file mapping mode for the share. + Restrict bool + // AllowedNames is the list of file names allowed when Restrict is true. + AllowedNames []string +} + +// Equals reports whether two share Config values describe the same share parameters. +func (c Config) Equals(other Config) bool { + return c.HostPath == other.HostPath && + c.ReadOnly == other.ReadOnly && + c.Restrict == other.Restrict && + slices.Equal(c.AllowedNames, other.AllowedNames) +} + +// VMPlan9Adder adds a Plan9 share to a Utility VM. +type VMPlan9Adder interface { + // AddPlan9 adds a Plan9 share to a running Utility VM. + AddPlan9(ctx context.Context, settings hcsschema.Plan9Share) error +} + +// VMPlan9Remover removes a Plan9 share from a Utility VM. +type VMPlan9Remover interface { + // RemovePlan9 removes a Plan9 share from a running Utility VM. + RemovePlan9(ctx context.Context, settings hcsschema.Plan9Share) error +} diff --git a/internal/controller/device/plan9/state.go b/internal/controller/device/plan9/state.go deleted file mode 100644 index edd8212bf8..0000000000 --- a/internal/controller/device/plan9/state.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build windows && !wcow - -package plan9 - -// shareState represents the current state of a Plan9 share's lifecycle. -// -// The normal progression is: -// -// sharePending → shareAdded → shareRemoved -// -// If AddPlan9 fails, the owning goroutine moves the share to -// shareInvalid and records the error in [shareEntry.stateErr]. Other goroutines -// waiting on the same entry observe the invalid state and receive the original error. -// The entry is removed from the map immediately after the transition. -// -// Full state-transition table: -// -// Current State │ Trigger │ Next State -// ───────────────┼───────────────────────────┼───────────────────────────── -// sharePending │ AddPlan9 succeeds │ shareAdded -// sharePending │ AddPlan9 fails │ shareInvalid -// shareAdded │ RemovePlan9 succeeds │ shareRemoved -// shareRemoved │ (terminal — no transitions)│ — -// shareInvalid │ entry removed from map │ — -type shareState int - -const ( - // sharePending is the initial state; AddPlan9 has not yet completed. - sharePending shareState = iota - - // shareAdded means AddPlan9 succeeded; the share is live on the VM. - shareAdded - - // shareRemoved means RemovePlan9 succeeded. This is a terminal state. - shareRemoved - - // shareInvalid means AddPlan9 failed. - shareInvalid -) - -// String returns a human-readable name for the [shareState]. -func (s shareState) String() string { - switch s { - case sharePending: - return "Pending" - case shareAdded: - return "Added" - case shareRemoved: - return "Removed" - case shareInvalid: - return "Invalid" - default: - return "Unknown" - } -} diff --git a/internal/controller/device/plan9/types.go b/internal/controller/device/plan9/types.go index 5828774bc0..ff9a2f20c5 100644 --- a/internal/controller/device/plan9/types.go +++ b/internal/controller/device/plan9/types.go @@ -1,85 +1,30 @@ -//go:build windows && !wcow +//go:build windows package plan9 import ( - "context" - "sync" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/controller/device/plan9/mount" + "github.com/Microsoft/hcsshim/internal/controller/device/plan9/share" ) -// AddOptions holds the configuration required to add a Plan9 share to the VM. -type AddOptions struct { - // HostPath is the path on the host to share into the VM. - HostPath string - - // ReadOnly indicates whether the share should be mounted read-only. - ReadOnly bool - - // Restrict enables single-file mapping mode for the share. - Restrict bool - - // AllowedNames is the list of file names allowed when Restrict is true. - AllowedNames []string -} - -// vmPlan9Manager manages adding and removing Plan9 shares on the host VM. -// Implemented by [vmmanager.UtilityVM]. -type vmPlan9Manager interface { - // AddPlan9 adds a plan 9 share to a running Utility VM. - AddPlan9(ctx context.Context, settings hcsschema.Plan9Share) error - - // RemovePlan9 removes a plan 9 share from a running Utility VM. - RemovePlan9(ctx context.Context, settings hcsschema.Plan9Share) error -} - -// ============================================================================== -// INTERNAL DATA STRUCTURES -// Types below this line are unexported and used for state tracking. -// ============================================================================== - -// shareEntry records one Plan9 share's full lifecycle state and reference count. -type shareEntry struct { - // mu serializes state transitions. - mu sync.Mutex - - // opts is the immutable share parameters used to match duplicate add requests. - opts *AddOptions +// reservation links a caller-supplied reservation ID to a Plan9 share host +// path. Access must be guarded by Controller.mu. +type reservation struct { + // hostPath is the key into Controller.sharesByHostPath for the share. + hostPath string - // name is the HCS-level identifier for this share, generated at allocation time. + // name is the share name corresponding to the hostPath. name string +} - // refCount is the number of active callers sharing this entry. - // Access must be guarded by [Manager.mu]. - refCount uint - - // state tracks the forward-only lifecycle position of this share. - // Access must be guarded by mu. - state shareState - - // stateErr records the error that caused a transition to [shareInvalid]. - // Waiters that find the entry in the invalid state return this error so - // that every caller sees the original failure reason. - stateErr error +// vmPlan9 combines the VM-side Plan9 add and remove operations. +type vmPlan9 interface { + share.VMPlan9Adder + share.VMPlan9Remover } -// optionsMatch reports whether two [AddOptions] values describe the same share. -// AllowedNames is compared in order. -func optionsMatch(a, b *AddOptions) bool { - if a == nil || b == nil { - return a == b - } - if a.HostPath != b.HostPath || a.ReadOnly != b.ReadOnly || a.Restrict != b.Restrict { - return false - } - if len(a.AllowedNames) != len(b.AllowedNames) { - return false - } - for i := range a.AllowedNames { - if a.AllowedNames[i] != b.AllowedNames[i] { - return false - } - } - return true +// guestPlan9 combines all guest-side Plan9 operations for LCOW guests. +type guestPlan9 interface { + mount.LinuxGuestPlan9Mounter + mount.LinuxGuestPlan9Unmounter } From 1a87085c4a40038f05a73b539d4f23a0c8a6780f Mon Sep 17 00:00:00 2001 From: Harsh Rawat Date: Tue, 7 Apr 2026 08:55:31 +0530 Subject: [PATCH 4/7] review 2 Signed-off-by: Harsh Rawat --- .../controller/device/plan9/share/share.go | 16 ++------ internal/hcs/schema2/plan9_share.go | 11 +----- internal/hcs/schema2/plan9_share_flags.go | 38 +++++++++++++++++++ internal/uvm/plan9.go | 17 ++------- 4 files changed, 46 insertions(+), 36 deletions(-) create mode 100644 internal/hcs/schema2/plan9_share_flags.go diff --git a/internal/controller/device/plan9/share/share.go b/internal/controller/device/plan9/share/share.go index 3f28c57692..2b46e799f7 100644 --- a/internal/controller/device/plan9/share/share.go +++ b/internal/controller/device/plan9/share/share.go @@ -15,16 +15,6 @@ import ( "github.com/Microsoft/hcsshim/internal/vm/vmutils" ) -// share-flag constants used in Plan9 HCS requests. -// -// These are marked private in the HCS schema. When public variants become -// available, we should replace these. -const ( - shareFlagsReadOnly int32 = 0x00000001 - shareFlagsLinuxMetadata int32 = 0x00000004 - shareFlagsRestrictFileAccess int32 = 0x00000080 -) - // Share represents a Plan9 share attached to a Hyper-V VM. It tracks the // share lifecycle and delegates guest mount management to [mount.Mount]. // @@ -98,12 +88,12 @@ func (s *Share) AddToVM(ctx context.Context, vm VMPlan9Adder) error { }).Debug("adding Plan9 share to VM") // Build the HCS flags from the share config. - flags := shareFlagsLinuxMetadata + flags := hcsschema.Plan9ShareFlagsLinuxMetadata if s.config.ReadOnly { - flags |= shareFlagsReadOnly + flags |= hcsschema.Plan9ShareFlagsReadOnly } if s.config.Restrict { - flags |= shareFlagsRestrictFileAccess + flags |= hcsschema.Plan9ShareFlagsRestrictFileAccess } // Attempt to add the share to the VM. diff --git a/internal/hcs/schema2/plan9_share.go b/internal/hcs/schema2/plan9_share.go index 41f8fdea02..ee7e68bc41 100644 --- a/internal/hcs/schema2/plan9_share.go +++ b/internal/hcs/schema2/plan9_share.go @@ -19,16 +19,7 @@ type Plan9Share struct { Port int32 `json:"Port,omitempty"` - // Flags are marked private. Until they are exported correctly - // - // ReadOnly 0x00000001 - // LinuxMetadata 0x00000004 - // CaseSensitive 0x00000008 - Flags int32 `json:"Flags,omitempty"` - - ReadOnly bool `json:"ReadOnly,omitempty"` - - UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + Flags Plan9ShareFlags `json:"Flags,omitempty"` AllowedFiles []string `json:"AllowedFiles,omitempty"` } diff --git a/internal/hcs/schema2/plan9_share_flags.go b/internal/hcs/schema2/plan9_share_flags.go new file mode 100644 index 0000000000..17f75ee993 --- /dev/null +++ b/internal/hcs/schema2/plan9_share_flags.go @@ -0,0 +1,38 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Plan9ShareFlags is a bitfield controlling Plan9 share behavior. +type Plan9ShareFlags int32 + +const ( + // Plan9ShareFlagsNone indicates no flags are set. + Plan9ShareFlagsNone Plan9ShareFlags = 0x00000000 + + // Plan9ShareFlagsReadOnly makes the share read-only. + Plan9ShareFlagsReadOnly Plan9ShareFlags = 0x00000001 + + // Plan9ShareFlagsLinuxMetadata enables writing Linux metadata. + Plan9ShareFlagsLinuxMetadata Plan9ShareFlags = 0x00000004 + + // Plan9ShareFlagsCaseSensitive creates directories in case-sensitive mode. + Plan9ShareFlagsCaseSensitive Plan9ShareFlags = 0x00000008 + + // Plan9ShareFlagsUseShareRootIdentity uses the identity of the share root + // when opening. + Plan9ShareFlagsUseShareRootIdentity Plan9ShareFlags = 0x00000010 + + // Plan9ShareFlagsRestrictFileAccess enables single-file mapping mode. + Plan9ShareFlagsRestrictFileAccess Plan9ShareFlags = 0x00000080 + + // Plan9ShareFlagsUnlimitedConnections allows multiple connections to this + // share. + Plan9ShareFlagsUnlimitedConnections Plan9ShareFlags = 0x00000100 +) diff --git a/internal/uvm/plan9.go b/internal/uvm/plan9.go index 88ce774a1d..555d12cecf 100644 --- a/internal/uvm/plan9.go +++ b/internal/uvm/plan9.go @@ -47,24 +47,15 @@ func (uvm *UtilityVM) AddPlan9(ctx context.Context, hostPath string, uvmPath str return nil, fmt.Errorf("adding writable shares is denied: %w", hcs.ErrOperationDenied) } - // TODO: JTERRY75 - These are marked private in the schema. For now use them - // but when there are public variants we need to switch to them. - const ( - shareFlagsReadOnly int32 = 0x00000001 - shareFlagsLinuxMetadata int32 = 0x00000004 - shareFlagsCaseSensitive int32 = 0x00000008 - shareFlagsRestrictFileAccess int32 = 0x00000080 - ) - - // TODO: JTERRY75 - `shareFlagsCaseSensitive` only works if the Windows + // TODO: JTERRY75 - `Plan9ShareFlagsCaseSensitive` only works if the Windows // `hostPath` supports case sensitivity. We need to detect this case before // forwarding this flag in all cases. - flags := shareFlagsLinuxMetadata // | shareFlagsCaseSensitive + flags := hcsschema.Plan9ShareFlagsLinuxMetadata // | hcsschema.Plan9ShareFlagsCaseSensitive if readOnly { - flags |= shareFlagsReadOnly + flags |= hcsschema.Plan9ShareFlagsReadOnly } if restrict { - flags |= shareFlagsRestrictFileAccess + flags |= hcsschema.Plan9ShareFlagsRestrictFileAccess } uvm.m.Lock() From 9bd31b7cb786e3f1b5ad0221285cd4b306395061 Mon Sep 17 00:00:00 2001 From: Harsh Rawat Date: Wed, 8 Apr 2026 06:25:54 +0530 Subject: [PATCH 5/7] update build constraints in all the plan9 files Signed-off-by: Harsh Rawat --- internal/controller/device/plan9/controller.go | 2 +- internal/controller/device/plan9/doc.go | 2 +- internal/controller/device/plan9/mount/doc.go | 2 +- internal/controller/device/plan9/mount/mocks/mock_mount.go | 4 +++- internal/controller/device/plan9/mount/mount.go | 2 +- internal/controller/device/plan9/mount/state.go | 2 +- internal/controller/device/plan9/mount/types.go | 2 +- internal/controller/device/plan9/share/doc.go | 2 +- internal/controller/device/plan9/share/mocks/mock_share.go | 4 +++- internal/controller/device/plan9/share/share.go | 2 +- internal/controller/device/plan9/share/state.go | 2 +- internal/controller/device/plan9/share/types.go | 2 +- internal/controller/device/plan9/types.go | 2 +- 13 files changed, 17 insertions(+), 13 deletions(-) diff --git a/internal/controller/device/plan9/controller.go b/internal/controller/device/plan9/controller.go index 0d9269c282..9c3a4db9e5 100644 --- a/internal/controller/device/plan9/controller.go +++ b/internal/controller/device/plan9/controller.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && lcow package plan9 diff --git a/internal/controller/device/plan9/doc.go b/internal/controller/device/plan9/doc.go index 3463f4cff1..59c45339ec 100644 --- a/internal/controller/device/plan9/doc.go +++ b/internal/controller/device/plan9/doc.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && lcow // Package plan9 manages the full lifecycle of Plan9 share mappings on a // Hyper-V VM, from host-side name allocation through guest-side mounting. diff --git a/internal/controller/device/plan9/mount/doc.go b/internal/controller/device/plan9/mount/doc.go index 59ee8caf17..fcad32ee09 100644 --- a/internal/controller/device/plan9/mount/doc.go +++ b/internal/controller/device/plan9/mount/doc.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && lcow // Package mount manages the lifecycle of a single Plan9 guest-side mount // inside a Hyper-V guest VM, from the initial reservation through mounting diff --git a/internal/controller/device/plan9/mount/mocks/mock_mount.go b/internal/controller/device/plan9/mount/mocks/mock_mount.go index 44cd54f50a..c271853f9e 100644 --- a/internal/controller/device/plan9/mount/mocks/mock_mount.go +++ b/internal/controller/device/plan9/mount/mocks/mock_mount.go @@ -1,9 +1,11 @@ +//go:build windows && lcow + // Code generated by MockGen. DO NOT EDIT. // Source: types.go // // Generated by this command: // -// mockgen -source types.go -package mocks -destination mocks/mock_mount.go +// mockgen -build_flags=-tags=lcow -build_constraint=windows && lcow -source types.go -package mocks -destination mocks/mock_mount.go // // Package mocks is a generated GoMock package. diff --git a/internal/controller/device/plan9/mount/mount.go b/internal/controller/device/plan9/mount/mount.go index 18badfd72d..fb4962adef 100644 --- a/internal/controller/device/plan9/mount/mount.go +++ b/internal/controller/device/plan9/mount/mount.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && lcow package mount diff --git a/internal/controller/device/plan9/mount/state.go b/internal/controller/device/plan9/mount/state.go index af665a0c30..87dcb18bc1 100644 --- a/internal/controller/device/plan9/mount/state.go +++ b/internal/controller/device/plan9/mount/state.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && lcow package mount diff --git a/internal/controller/device/plan9/mount/types.go b/internal/controller/device/plan9/mount/types.go index 964f33fc0a..5f347b8c80 100644 --- a/internal/controller/device/plan9/mount/types.go +++ b/internal/controller/device/plan9/mount/types.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && lcow package mount diff --git a/internal/controller/device/plan9/share/doc.go b/internal/controller/device/plan9/share/doc.go index 3f64d75e1d..a429db2b9e 100644 --- a/internal/controller/device/plan9/share/doc.go +++ b/internal/controller/device/plan9/share/doc.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && lcow // Package share manages the lifecycle of a single Plan9 share attached to a // Hyper-V VM, from host-side name allocation through guest-side mounting. diff --git a/internal/controller/device/plan9/share/mocks/mock_share.go b/internal/controller/device/plan9/share/mocks/mock_share.go index be1380b013..02d4ae1ce8 100644 --- a/internal/controller/device/plan9/share/mocks/mock_share.go +++ b/internal/controller/device/plan9/share/mocks/mock_share.go @@ -1,9 +1,11 @@ +//go:build windows && lcow + // Code generated by MockGen. DO NOT EDIT. // Source: types.go // // Generated by this command: // -// mockgen -source types.go -package mocks -destination mocks/mock_share.go +// mockgen -build_flags=-tags=lcow -build_constraint=windows && lcow -source types.go -package mocks -destination mocks/mock_share.go // // Package mocks is a generated GoMock package. diff --git a/internal/controller/device/plan9/share/share.go b/internal/controller/device/plan9/share/share.go index 2b46e799f7..64e1ddea53 100644 --- a/internal/controller/device/plan9/share/share.go +++ b/internal/controller/device/plan9/share/share.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && lcow package share diff --git a/internal/controller/device/plan9/share/state.go b/internal/controller/device/plan9/share/state.go index b9096c3f4e..0c36d1dc57 100644 --- a/internal/controller/device/plan9/share/state.go +++ b/internal/controller/device/plan9/share/state.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && lcow package share diff --git a/internal/controller/device/plan9/share/types.go b/internal/controller/device/plan9/share/types.go index c8830fa78a..99b679128e 100644 --- a/internal/controller/device/plan9/share/types.go +++ b/internal/controller/device/plan9/share/types.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && lcow package share diff --git a/internal/controller/device/plan9/types.go b/internal/controller/device/plan9/types.go index ff9a2f20c5..4d81412c53 100644 --- a/internal/controller/device/plan9/types.go +++ b/internal/controller/device/plan9/types.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && lcow package plan9 From 2d9f9241a815fb217832ab447ac88bf8d97086c1 Mon Sep 17 00:00:00 2001 From: Harsh Rawat Date: Wed, 8 Apr 2026 06:52:22 +0530 Subject: [PATCH 6/7] refactor share state management to introduce StateInvalid for failed attempts Signed-off-by: Harsh Rawat --- .../controller/device/plan9/controller.go | 17 +-- .../device/plan9/controller_test.go | 133 +++++++++++++++++- internal/controller/device/plan9/mount/doc.go | 18 +-- .../controller/device/plan9/mount/mount.go | 25 ++-- .../device/plan9/mount/mount_test.go | 33 +++-- .../controller/device/plan9/mount/state.go | 20 ++- internal/controller/device/plan9/share/doc.go | 13 +- .../controller/device/plan9/share/share.go | 33 +++-- .../device/plan9/share/share_test.go | 89 ++++++++++-- .../controller/device/plan9/share/state.go | 19 ++- 10 files changed, 324 insertions(+), 76 deletions(-) diff --git a/internal/controller/device/plan9/controller.go b/internal/controller/device/plan9/controller.go index 9c3a4db9e5..348cc39de7 100644 --- a/internal/controller/device/plan9/controller.go +++ b/internal/controller/device/plan9/controller.go @@ -23,7 +23,7 @@ import ( // // 1. Obtain a reservation using Reserve(). // -// 2. Use the reservation in MapToGuest() to mount the share unto guest. +// 2. Use the reservation in MapToGuest() to mount the share into the guest. // // 3. Call UnmapFromGuest() to release the reservation and all resources. // @@ -171,8 +171,8 @@ func (c *Controller) MapToGuest(ctx context.Context, id guid.GUID) (string, erro // Validate if the host path has an associated share. // This should be reserved by the Reserve() call. - existingShare := c.sharesByHostPath[res.hostPath] - if existingShare == nil { + existingShare, ok := c.sharesByHostPath[res.hostPath] + if !ok { return "", fmt.Errorf("share for host path %s not found", res.hostPath) } @@ -210,14 +210,9 @@ func (c *Controller) UnmapFromGuest(ctx context.Context, id guid.GUID) error { // Validate that the share exists before proceeding with teardown. // This should be reserved by the Reserve() call. - existingShare := c.sharesByHostPath[res.hostPath] - if existingShare == nil { - // Share is gone — a sibling reservation already cleaned it up. - // Example: A and B reserve the same path; A's AddToVM fails - // (share→StateRemoved) and A's UnmapFromGuest deletes the map entry. - // B has nothing left to clean up, so just drop the reservation. - delete(c.reservations, id) - return nil + existingShare, ok := c.sharesByHostPath[res.hostPath] + if !ok { + return fmt.Errorf("share for host path %s not found", res.hostPath) } log.G(ctx).WithField(logfields.HostPath, existingShare.HostPath()).Debug("unmapping Plan9 share from guest") diff --git a/internal/controller/device/plan9/controller_test.go b/internal/controller/device/plan9/controller_test.go index a55f50a7a9..532f3e7bbe 100644 --- a/internal/controller/device/plan9/controller_test.go +++ b/internal/controller/device/plan9/controller_test.go @@ -274,7 +274,7 @@ func TestMapToGuest_GuestMountFails_RetryMapToGuest_Fails(t *testing.T) { // Forward retry: MapToGuest again with the same reservation. // AddToVM is idempotent (share already in StateAdded), but MountToGuest - // fails because the mount is in the terminal StateUnmounted. + // fails because the mount is in StateInvalid. _, err = tc.c.MapToGuest(tc.ctx, id) if err == nil { t.Fatal("expected error on retry MapToGuest after terminal mount failure") @@ -525,3 +525,134 @@ func TestFullLifecycle_ReuseAfterRelease(t *testing.T) { t.Error("expected a new reservation ID after re-reserving a released path") } } + +// TestUnmapFromGuest_AddToVMFails_MultipleReservations_AllDrain verifies that +// when two callers reserve the same host path and AddToVM fails, the share +// stays in the controller's map (in StateInvalid) until both callers call +// UnmapFromGuest to drain their mount reservations. Only after the last ref +// is drained does the share transition to StateRemoved and get cleaned up. +func TestUnmapFromGuest_AddToVMFails_MultipleReservations_AllDrain(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + // Two callers reserve the same host path. + id1, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id2, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + // First caller attempts MapToGuest — AddToVM fails. + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) + _, err := tc.c.MapToGuest(tc.ctx, id1) + if err == nil { + t.Fatal("expected error when VM add fails") + } + + // First caller's UnmapFromGuest: decrements mount ref but share stays + // (mount ref > 0 → share stays in StateInvalid). + if err := tc.c.UnmapFromGuest(tc.ctx, id1); err != nil { + t.Fatalf("first UnmapFromGuest: %v", err) + } + + // Share must still be in the map — second caller hasn't drained yet. + if len(tc.c.sharesByHostPath) != 1 { + t.Errorf("expected share to remain in map after first unmap, got %d shares", len(tc.c.sharesByHostPath)) + } + if len(tc.c.reservations) != 1 { + t.Errorf("expected 1 remaining reservation, got %d", len(tc.c.reservations)) + } + + // Second caller's UnmapFromGuest: drains last mount ref → share transitions + // to StateRemoved → controller cleans up the share entry. + if err := tc.c.UnmapFromGuest(tc.ctx, id2); err != nil { + t.Fatalf("second UnmapFromGuest: %v", err) + } + + // Everything should be cleaned up. + if len(tc.c.reservations) != 0 { + t.Errorf("expected 0 reservations after all unmaps, got %d", len(tc.c.reservations)) + } + if len(tc.c.sharesByHostPath) != 0 { + t.Errorf("expected 0 shares after all unmaps, got %d", len(tc.c.sharesByHostPath)) + } +} + +// TestUnmapFromGuest_GuestMountFails_MultipleReservations_AllDrain verifies +// that when two callers reserve the same host path and MapToGuest fails at the +// guest mount stage (AddToVM succeeds, AddLCOWMappedDirectory fails), the share +// and mount stay in the controller's maps until all callers have called +// UnmapFromGuest to drain their mount reservations. +func TestUnmapFromGuest_GuestMountFails_MultipleReservations_AllDrain(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + // Two callers reserve the same host path. + id1, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id2, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + // First caller attempts MapToGuest — AddToVM succeeds, guest mount fails. + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) + tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(errMount) + _, err := tc.c.MapToGuest(tc.ctx, id1) + if err == nil { + t.Fatal("expected error when guest mount fails") + } + + // First caller's UnmapFromGuest: decrements mount ref but share stays + // because second caller still holds a reservation. + // VM remove should NOT be called yet — second caller hasn't drained. + tc.vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(0) + if err := tc.c.UnmapFromGuest(tc.ctx, id1); err != nil { + t.Fatalf("first UnmapFromGuest: %v", err) + } + + // Share must still be in the map — second caller hasn't drained yet. + if len(tc.c.sharesByHostPath) != 1 { + t.Errorf("expected share to remain in map after first unmap, got %d shares", len(tc.c.sharesByHostPath)) + } + if len(tc.c.reservations) != 1 { + t.Errorf("expected 1 remaining reservation, got %d", len(tc.c.reservations)) + } + + // Second caller's UnmapFromGuest: drains last mount ref → share transitions + // to StateRemoved → controller cleans up the share entry. + tc.vmRemove.EXPECT().RemovePlan9(gomock.Any(), gomock.Any()).Return(nil) + if err := tc.c.UnmapFromGuest(tc.ctx, id2); err != nil { + t.Fatalf("second UnmapFromGuest: %v", err) + } + + // Everything should be cleaned up. + if len(tc.c.reservations) != 0 { + t.Errorf("expected 0 reservations after all unmaps, got %d", len(tc.c.reservations)) + } + if len(tc.c.sharesByHostPath) != 0 { + t.Errorf("expected 0 shares after all unmaps, got %d", len(tc.c.sharesByHostPath)) + } +} + +// TestUnmapFromGuest_AddToVMFails_SingleReservation_Drains verifies that when +// a single caller reserves a host path and AddToVM fails, UnmapFromGuest +// correctly drains the mount and transitions the share to StateRemoved. +func TestUnmapFromGuest_AddToVMFails_SingleReservation_Drains(t *testing.T) { + t.Parallel() + tc := newTestController(t, false) + + id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + + // MapToGuest fails at AddToVM. + tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) + _, err := tc.c.MapToGuest(tc.ctx, id) + if err == nil { + t.Fatal("expected error when VM add fails") + } + + // UnmapFromGuest drains the mount and removes the share. + if err := tc.c.UnmapFromGuest(tc.ctx, id); err != nil { + t.Fatalf("UnmapFromGuest: %v", err) + } + + if len(tc.c.reservations) != 0 { + t.Errorf("expected 0 reservations, got %d", len(tc.c.reservations)) + } + if len(tc.c.sharesByHostPath) != 0 { + t.Errorf("expected 0 shares, got %d", len(tc.c.sharesByHostPath)) + } +} diff --git a/internal/controller/device/plan9/mount/doc.go b/internal/controller/device/plan9/mount/doc.go index fcad32ee09..7f65477320 100644 --- a/internal/controller/device/plan9/mount/doc.go +++ b/internal/controller/device/plan9/mount/doc.go @@ -16,15 +16,15 @@ // # Mount Lifecycle // // ┌──────────────────────┐ -// │ StateReserved │ ← mount failure → StateUnmounted (not retriable) -// └──────────┬───────────┘ -// │ guest mount succeeds -// ▼ -// ┌──────────────────────┐ -// │ StateMounted │ -// └──────────┬───────────┘ -// │ reference count → 0; -// │ guest unmount succeeds +// │ StateReserved │ ← mount failure → StateInvalid +// └──────────┬───────────┘ │ +// │ guest mount succeeds │ all refs drained +// ▼ ▼ +// ┌──────────────────────┐ ┌──────────────────────┐ +// │ StateMounted │ │ StateUnmounted │ +// └──────────┬───────────┘ └──────────────────────┘ +// │ reference count → 0; (terminal — entry +// │ guest unmount succeeds removed from share) // ▼ // ┌──────────────────────┐ // │ StateUnmounted │ diff --git a/internal/controller/device/plan9/mount/mount.go b/internal/controller/device/plan9/mount/mount.go index fb4962adef..8d028571c5 100644 --- a/internal/controller/device/plan9/mount/mount.go +++ b/internal/controller/device/plan9/mount/mount.go @@ -100,8 +100,9 @@ func (m *Mount) MountToGuest(ctx context.Context, guest LinuxGuestPlan9Mounter) Port: vmutils.Plan9Port, ReadOnly: m.config.ReadOnly, }); err != nil { - // Move to unmounted since no guest state was established from Reserved. - m.state = StateUnmounted + // Move to invalid since no guest state was established from Reserved, + // but outstanding reservations may still need to be drained. + m.state = StateInvalid return "", fmt.Errorf("add LCOW mapped directory share=%s: %w", m.shareName, err) } @@ -117,10 +118,9 @@ func (m *Mount) MountToGuest(ctx context.Context, guest LinuxGuestPlan9Mounter) // existing guest path directly. return m.guestPath, nil - case StateUnmounted: + default: return "", fmt.Errorf("cannot mount a share in state %s", m.state) } - return "", nil } // UnmountFromGuest decrements the reference count and, when it reaches zero, @@ -160,10 +160,17 @@ func (m *Mount) UnmountFromGuest(ctx context.Context, guest LinuxGuestPlan9Unmou m.refCount-- return nil - case StateUnmounted: - // Already in the terminal state — nothing to do. This can happen when - // MountToGuest failed (it transitions StateReserved → StateUnmounted), - // and the caller subsequently calls UnmapFromGuest to clean up. + case StateInvalid: + // The guest mount failed; no guest-side state to tear down. + // Decrement the ref count and transition to the terminal state + // once all reservations are drained. + m.refCount-- + if m.refCount == 0 { + m.state = StateUnmounted + } + return nil + + default: + return fmt.Errorf("cannot unmount a share in state %s", m.state) } - return nil } diff --git a/internal/controller/device/plan9/mount/mount_test.go b/internal/controller/device/plan9/mount/mount_test.go index 8341287bce..d94b3a385a 100644 --- a/internal/controller/device/plan9/mount/mount_test.go +++ b/internal/controller/device/plan9/mount/mount_test.go @@ -118,10 +118,10 @@ func TestMountToGuest_HappyPath(t *testing.T) { } } -// TestMountToGuest_GuestFails_TransitionsToUnmounted verifies that when the +// TestMountToGuest_GuestFails_TransitionsToInvalid verifies that when the // guest AddLCOWMappedDirectory call fails, the mount transitions directly from -// StateReserved to StateUnmounted (the terminal state). -func TestMountToGuest_GuestFails_TransitionsToUnmounted(t *testing.T) { +// StateReserved to StateInvalid so outstanding reservations can be drained. +func TestMountToGuest_GuestFails_TransitionsToInvalid(t *testing.T) { ctrl := gomock.NewController(t) guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) @@ -133,8 +133,8 @@ func TestMountToGuest_GuestFails_TransitionsToUnmounted(t *testing.T) { if err == nil { t.Fatal("expected error") } - if m.State() != StateUnmounted { - t.Errorf("expected StateUnmounted after mount failure, got %v", m.State()) + if m.State() != StateInvalid { + t.Errorf("expected StateInvalid after mount failure, got %v", m.State()) } } @@ -165,9 +165,9 @@ func TestMountToGuest_AlreadyMounted_Idempotent(t *testing.T) { } } -// TestMountToGuest_OnUnmounted_Errors verifies that calling MountToGuest on a -// mount in the terminal StateUnmounted returns an error. -func TestMountToGuest_OnUnmounted_Errors(t *testing.T) { +// TestMountToGuest_OnInvalid_Errors verifies that calling MountToGuest on a +// mount in StateInvalid (from a prior mount failure) returns an error. +func TestMountToGuest_OnInvalid_Errors(t *testing.T) { ctrl := gomock.NewController(t) guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) @@ -175,10 +175,10 @@ func TestMountToGuest_OnUnmounted_Errors(t *testing.T) { guest.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(errGuestMount) _, _ = m.MountToGuest(context.Background(), guest) - // Try to mount again from StateUnmounted. + // Try to mount again from StateInvalid. _, err := m.MountToGuest(context.Background(), guest) if err == nil { - t.Fatal("expected error when mounting from StateUnmounted") + t.Fatal("expected error when mounting from StateInvalid") } } @@ -308,10 +308,9 @@ func TestUnmountFromGuest_OnReserved_NoGuestCall(t *testing.T) { } } -// TestUnmountFromGuest_OnUnmounted_NoOp verifies that calling UnmountFromGuest -// on a mount already in the terminal StateUnmounted is a no-op that returns -// no error and does not issue any guest call. -func TestUnmountFromGuest_OnUnmounted_NoOp(t *testing.T) { +// TestUnmountFromGuest_OnUnmounted_Errors verifies that calling UnmountFromGuest +// on a mount already in the terminal StateUnmounted returns an error. +func TestUnmountFromGuest_OnUnmounted_Errors(t *testing.T) { ctrl := gomock.NewController(t) guest := mocks.NewMockLinuxGuestPlan9Mounter(ctrl) guestUnmount := mocks.NewMockLinuxGuestPlan9Unmounter(ctrl) @@ -323,8 +322,8 @@ func TestUnmountFromGuest_OnUnmounted_NoOp(t *testing.T) { guestUnmount.EXPECT().RemoveLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) _ = m.UnmountFromGuest(context.Background(), guestUnmount) - // Second unmount on StateUnmounted — must be a no-op with no error. - if err := m.UnmountFromGuest(context.Background(), guestUnmount); err != nil { - t.Fatalf("unexpected error on double unmount: %v", err) + // Second unmount on StateUnmounted — must return an error. + if err := m.UnmountFromGuest(context.Background(), guestUnmount); err == nil { + t.Fatal("expected error when unmounting an already-unmounted mount") } } diff --git a/internal/controller/device/plan9/mount/state.go b/internal/controller/device/plan9/mount/state.go index 87dcb18bc1..1c23abfa61 100644 --- a/internal/controller/device/plan9/mount/state.go +++ b/internal/controller/device/plan9/mount/state.go @@ -9,8 +9,11 @@ package mount // // StateReserved → StateMounted → StateUnmounted // -// Mount failure from StateReserved transitions directly to the terminal -// StateUnmounted state; the entry is then removed by the parent [share.Share]. +// Mount failure from StateReserved transitions to [StateInvalid]. +// In [StateInvalid] the guest mount was never established, but outstanding +// reservations may still exist. The mount remains in [StateInvalid] until +// all reservations have been drained via [Mount.UnmountFromGuest], at which +// point it transitions to [StateUnmounted]. // An unmount failure from StateMounted leaves the mount in StateMounted so // the caller can retry. // @@ -19,12 +22,14 @@ package mount // Current State │ Trigger │ Next State // ────────────────┼────────────────────────────────────────────┼────────────────────── // StateReserved │ guest mount succeeds │ StateMounted -// StateReserved │ guest mount fails │ StateUnmounted +// StateReserved │ guest mount fails │ StateInvalid // StateReserved │ UnmountFromGuest (refCount > 1) │ StateReserved (ref--) // StateReserved │ UnmountFromGuest (refCount == 1) │ StateUnmounted // StateMounted │ UnmountFromGuest (refCount > 1) │ StateMounted (ref--) // StateMounted │ UnmountFromGuest (refCount == 1) succeeds │ StateUnmounted // StateMounted │ UnmountFromGuest (refCount == 1) fails │ StateMounted +// StateInvalid │ UnmountFromGuest (refCount > 1) │ StateInvalid (ref--) +// StateInvalid │ UnmountFromGuest (refCount == 1) │ StateUnmounted // StateUnmounted │ UnmountFromGuest │ StateUnmounted (no-op) // StateUnmounted │ (terminal — entry removed from share) │ — type State int @@ -38,6 +43,13 @@ const ( // the guest. The guest path is valid from this state onward. StateMounted + // StateInvalid means the guest mount operation failed. No guest-side + // state was established, but outstanding reservations may still need + // to be drained via [Mount.UnmountFromGuest]. Once all reservations + // are released (refCount reaches zero), the mount transitions to + // [StateUnmounted]. + StateInvalid + // StateUnmounted means the guest has unmounted the share. This is a // terminal state; the entry is removed from the parent share. StateUnmounted @@ -50,6 +62,8 @@ func (s State) String() string { return "Reserved" case StateMounted: return "Mounted" + case StateInvalid: + return "Invalid" case StateUnmounted: return "Unmounted" default: diff --git a/internal/controller/device/plan9/share/doc.go b/internal/controller/device/plan9/share/doc.go index a429db2b9e..7853cdaa55 100644 --- a/internal/controller/device/plan9/share/doc.go +++ b/internal/controller/device/plan9/share/doc.go @@ -15,7 +15,7 @@ // # Share Lifecycle // // ┌──────────────────────┐ -// │ StateReserved │ ← add failure → StateRemoved (not retriable) +// │ StateReserved │ ← add failure → StateInvalid // └──────────┬───────────┘ // │ share added to VM // ▼ @@ -30,4 +30,15 @@ // │ StateRemoved │ // └──────────────────────┘ // (terminal — entry removed from controller) +// +// ┌──────────────────────┐ +// │ StateInvalid │ ← add failed; share never on VM +// └──────────┬───────────┘ +// │ all mount reservations drained +// │ via UnmountFromGuest + RemoveFromVM +// ▼ +// ┌──────────────────────┐ +// │ StateRemoved │ +// └──────────────────────┘ +// (terminal — entry removed from controller) package share diff --git a/internal/controller/device/plan9/share/share.go b/internal/controller/device/plan9/share/share.go index 64e1ddea53..51ec250c51 100644 --- a/internal/controller/device/plan9/share/share.go +++ b/internal/controller/device/plan9/share/share.go @@ -75,8 +75,9 @@ func (s *Share) GuestPath() string { } // AddToVM adds the share to the VM's Plan9 provider. It is idempotent for an -// already-added share; on failure the share is moved into removed state and -// a new [Share] must be created to retry. +// already-added share; on failure the share is moved into invalid state so +// that outstanding mount reservations can be drained before the share is +// fully removed. func (s *Share) AddToVM(ctx context.Context, vm VMPlan9Adder) error { // Drive the state machine. @@ -105,10 +106,10 @@ func (s *Share) AddToVM(ctx context.Context, vm VMPlan9Adder) error { Flags: flags, AllowedFiles: s.config.AllowedNames, }); err != nil { - // Since the share was never added, move directly to the terminal - // Removed state. No guest state was established, so there is nothing - // to clean up. - s.state = StateRemoved + // The share was never added to the VM. Transition to Invalid so + // that outstanding mount reservations can still be drained by + // callers via UnmountFromGuest before the share is fully removed. + s.state = StateInvalid return fmt.Errorf("add Plan9 share %s to VM: %w", s.name, err) } @@ -121,13 +122,18 @@ func (s *Share) AddToVM(ctx context.Context, vm VMPlan9Adder) error { // Already added — no-op. return nil + case StateInvalid: + // A previous add attempt failed. The caller must drain all mount + // reservations via UnmountFromGuest and then call RemoveFromVM to + // transition to StateRemoved. + return fmt.Errorf("share %s is in invalid state; drain mounts and remove", s.name) + case StateRemoved: // Re-adding a removed share is not supported. return fmt.Errorf("share %s already removed", s.name) default: + return fmt.Errorf("share %s in unknown state %d", s.name, s.state) } - - return nil } // RemoveFromVM removes the share from the VM. It is idempotent for a share @@ -162,8 +168,19 @@ func (s *Share) RemoveFromVM(ctx context.Context, vm VMPlan9Remover) error { s.state = StateRemoved log.G(ctx).Debug("Plan9 share removed from VM") + case StateInvalid: + // The share was never successfully added to the VM. Wait for all + // mount reservations to be drained before transitioning to Removed. + if s.mount != nil { + return nil + } + + s.state = StateRemoved + log.G(ctx).Debug("invalid Plan9 share transitioned to removed (all mounts drained)") + case StateRemoved: // Already fully removed — no-op. + // Controller needs to remove ref from its map. } return nil diff --git a/internal/controller/device/plan9/share/share_test.go b/internal/controller/device/plan9/share/share_test.go index 3dda1cdcf7..6c4e991e24 100644 --- a/internal/controller/device/plan9/share/share_test.go +++ b/internal/controller/device/plan9/share/share_test.go @@ -71,9 +71,10 @@ func TestAddToVM_HappyPath(t *testing.T) { } } -// TestAddToVM_VMFails_TransitionsToRemoved verifies that when the VM AddPlan9 -// call fails, the share transitions directly to StateRemoved. -func TestAddToVM_VMFails_TransitionsToRemoved(t *testing.T) { +// TestAddToVM_VMFails_TransitionsToInvalid verifies that when the VM AddPlan9 +// call fails, the share transitions to StateInvalid so that outstanding +// mount reservations can be drained before the share is fully removed. +func TestAddToVM_VMFails_TransitionsToInvalid(t *testing.T) { ctrl := gomock.NewController(t) vm := mocks.NewMockVMPlan9Adder(ctrl) @@ -84,8 +85,8 @@ func TestAddToVM_VMFails_TransitionsToRemoved(t *testing.T) { if err == nil { t.Fatal("expected error") } - if s.State() != StateRemoved { - t.Errorf("expected StateRemoved after VM add failure, got %v", s.State()) + if s.State() != StateInvalid { + t.Errorf("expected StateInvalid after VM add failure, got %v", s.State()) } } @@ -110,9 +111,9 @@ func TestAddToVM_AlreadyAdded_Idempotent(t *testing.T) { } } -// TestAddToVM_OnRemovedShare_Errors verifies that calling AddToVM on a share in -// the terminal StateRemoved returns an error without issuing any VM call. -func TestAddToVM_OnRemovedShare_Errors(t *testing.T) { +// TestAddToVM_OnInvalidShare_Errors verifies that calling AddToVM on a share in +// StateInvalid (previous add failed) returns an error without issuing any VM call. +func TestAddToVM_OnInvalidShare_Errors(t *testing.T) { ctrl := gomock.NewController(t) vm := mocks.NewMockVMPlan9Adder(ctrl) @@ -120,10 +121,10 @@ func TestAddToVM_OnRemovedShare_Errors(t *testing.T) { vm.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) _ = s.AddToVM(context.Background(), vm) - // Share is now StateRemoved — AddToVM must return an error. + // Share is now StateInvalid — AddToVM must return an error. err := s.AddToVM(context.Background(), vm) if err == nil { - t.Fatal("expected error when adding a removed share") + t.Fatal("expected error when adding an invalid share") } } @@ -258,6 +259,66 @@ func TestRemoveFromVM_WithActiveMountSkipsRemove(t *testing.T) { } } +// TestRemoveFromVM_OnInvalid_WithActiveMount_NoOps verifies that RemoveFromVM +// on a StateInvalid share with an active mount reservation does not transition +// to StateRemoved and keeps the share in StateInvalid. +func TestRemoveFromVM_OnInvalid_WithActiveMount_NoOps(t *testing.T) { + ctrl := gomock.NewController(t) + vmAdd := mocks.NewMockVMPlan9Adder(ctrl) + vmRemove := mocks.NewMockVMPlan9Remover(ctrl) + + s := NewReserved("share0", newTestConfig()) + // Reserve a mount before AddToVM to simulate the controller's Reserve flow. + _, _ = s.ReserveMount(context.Background(), mount.Config{}) + + // AddToVM fails → StateInvalid, but mount is still active. + vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) + _ = s.AddToVM(context.Background(), vmAdd) + + if s.State() != StateInvalid { + t.Fatalf("expected StateInvalid, got %v", s.State()) + } + + // RemoveFromVM must not transition to Removed while mount is active. + if err := s.RemoveFromVM(context.Background(), vmRemove); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if s.State() != StateInvalid { + t.Errorf("expected StateInvalid (mount still active), got %v", s.State()) + } +} + +// TestRemoveFromVM_OnInvalid_NoMount_TransitionsToRemoved verifies that +// RemoveFromVM on a StateInvalid share with no active mount transitions to +// StateRemoved. +func TestRemoveFromVM_OnInvalid_NoMount_TransitionsToRemoved(t *testing.T) { + ctrl := gomock.NewController(t) + vmAdd := mocks.NewMockVMPlan9Adder(ctrl) + vmRemove := mocks.NewMockVMPlan9Remover(ctrl) + guestUnmount := mountmocks.NewMockLinuxGuestPlan9Unmounter(ctrl) + + s := NewReserved("share0", newTestConfig()) + // Reserve a mount, then drain it so mount becomes nil. + _, _ = s.ReserveMount(context.Background(), mount.Config{}) + + // AddToVM fails → StateInvalid. + vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) + _ = s.AddToVM(context.Background(), vmAdd) + + // Drain the mount via UnmountFromGuest. + if err := s.UnmountFromGuest(context.Background(), guestUnmount); err != nil { + t.Fatalf("unexpected error during unmount: %v", err) + } + + // Now RemoveFromVM should transition to Removed. + if err := s.RemoveFromVM(context.Background(), vmRemove); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if s.State() != StateRemoved { + t.Errorf("expected StateRemoved after draining mounts, got %v", s.State()) + } +} + // ───────────────────────────────────────────────────────────────────────────── // ReserveMount // ───────────────────────────────────────────────────────────────────────────── @@ -307,9 +368,9 @@ func TestReserveMount_DifferentConfig_Errors(t *testing.T) { } } -// TestReserveMount_OnRemovedShare_Errors verifies that calling ReserveMount on -// a share that has reached the terminal StateRemoved returns an error. -func TestReserveMount_OnRemovedShare_Errors(t *testing.T) { +// TestReserveMount_OnInvalidShare_Errors verifies that calling ReserveMount on +// a share in StateInvalid (previous add failed) returns an error. +func TestReserveMount_OnInvalidShare_Errors(t *testing.T) { ctrl := gomock.NewController(t) vm := mocks.NewMockVMPlan9Adder(ctrl) @@ -319,7 +380,7 @@ func TestReserveMount_OnRemovedShare_Errors(t *testing.T) { _, err := s.ReserveMount(context.Background(), mount.Config{}) if err == nil { - t.Fatal("expected error when reserving mount on removed share") + t.Fatal("expected error when reserving mount on invalid share") } } diff --git a/internal/controller/device/plan9/share/state.go b/internal/controller/device/plan9/share/state.go index 0c36d1dc57..c5eaad8892 100644 --- a/internal/controller/device/plan9/share/state.go +++ b/internal/controller/device/plan9/share/state.go @@ -8,8 +8,11 @@ package share // // StateReserved → StateAdded → StateRemoved // -// Add failure from StateReserved transitions directly to the -// terminal StateRemoved state; create a new [Share] to retry. +// Add failure from StateReserved transitions to [StateInvalid]. +// In [StateInvalid] the share was never added to the VM, but outstanding +// mount reservations may still exist. The share remains in [StateInvalid] +// until all mount reservations have been drained via [Share.UnmountFromGuest], +// at which point [Share.RemoveFromVM] transitions it to [StateRemoved]. // A VM removal failure from StateAdded leaves the share in StateAdded // so the caller can retry only the removal step. // @@ -18,10 +21,12 @@ package share // Current State │ Trigger │ Next State // ───────────────┼───────────────────────────────────────┼────────────────── // StateReserved │ add succeeds │ StateAdded -// StateReserved │ add fails │ StateRemoved +// StateReserved │ add fails │ StateInvalid // StateAdded │ mount still active │ StateAdded (no-op) // StateAdded │ removal succeeds │ StateRemoved // StateAdded │ removal fails │ StateAdded +// StateInvalid │ RemoveFromVM (mount active) │ StateInvalid (no-op) +// StateInvalid │ RemoveFromVM (no mount) │ StateRemoved // StateRemoved │ (terminal — no further transitions) │ — type State int @@ -34,6 +39,12 @@ const ( // The guest mount is driven from this state. StateAdded + // StateInvalid means the VM-side add failed. The share was never + // established on the VM, but outstanding mount reservations may still + // need to be drained. Once all mounts are released, + // [Share.RemoveFromVM] transitions the share to [StateRemoved]. + StateInvalid + // StateRemoved means the share has been fully removed from the VM. // This is a terminal state. StateRemoved @@ -46,6 +57,8 @@ func (s State) String() string { return "Reserved" case StateAdded: return "Added" + case StateInvalid: + return "Invalid" case StateRemoved: return "Removed" default: From e0337b2cfe2139f94d30fc2fb207428e5fabf0a6 Mon Sep 17 00:00:00 2001 From: Harsh Rawat Date: Wed, 8 Apr 2026 08:58:38 +0530 Subject: [PATCH 7/7] refactor share management to remove GuestPath from Reserve API Signed-off-by: Harsh Rawat --- .../controller/device/plan9/controller.go | 22 ++---- .../device/plan9/controller_test.go | 75 ++++++++----------- .../controller/device/plan9/share/share.go | 9 --- .../device/plan9/share/share_test.go | 14 +--- 4 files changed, 44 insertions(+), 76 deletions(-) diff --git a/internal/controller/device/plan9/controller.go b/internal/controller/device/plan9/controller.go index 348cc39de7..75fab62769 100644 --- a/internal/controller/device/plan9/controller.go +++ b/internal/controller/device/plan9/controller.go @@ -78,13 +78,13 @@ func New(vm vmPlan9, linuxGuest guestPlan9, noWritableFileShares bool) *Controll // If an error is returned from this function, it is guaranteed that no // reservation mapping was made and no UnmapFromGuest() call is necessary to // clean up. -func (c *Controller) Reserve(ctx context.Context, shareConfig share.Config, mountConfig mount.Config) (guid.GUID, string, error) { +func (c *Controller) Reserve(ctx context.Context, shareConfig share.Config, mountConfig mount.Config) (guid.GUID, error) { c.mu.Lock() defer c.mu.Unlock() // Validate write-share policy before touching shared state. if !shareConfig.ReadOnly && c.noWritableFileShares { - return guid.GUID{}, "", fmt.Errorf("adding writable Plan9 shares is denied") + return guid.GUID{}, fmt.Errorf("adding writable Plan9 shares is denied") } ctx, _ = log.WithContext(ctx, logrus.WithField(logfields.HostPath, shareConfig.HostPath)) @@ -93,13 +93,13 @@ func (c *Controller) Reserve(ctx context.Context, shareConfig share.Config, moun // Generate a unique reservation ID. id, err := guid.NewV4() if err != nil { - return guid.GUID{}, "", fmt.Errorf("generate reservation ID: %w", err) + return guid.GUID{}, fmt.Errorf("generate reservation ID: %w", err) } // Check if the generated reservation ID already exists, which is extremely unlikely, // but we want to be certain before proceeding with share creation. if _, ok := c.reservations[id]; ok { - return guid.GUID{}, "", fmt.Errorf("reservation ID already exists: %s", id) + return guid.GUID{}, fmt.Errorf("reservation ID already exists: %s", id) } // Create the reservation entry. @@ -107,8 +107,6 @@ func (c *Controller) Reserve(ctx context.Context, shareConfig share.Config, moun hostPath: shareConfig.HostPath, } - var guestPath string - // Check whether this host path already has an allocated share. existingShare, ok := c.sharesByHostPath[shareConfig.HostPath] @@ -116,7 +114,7 @@ func (c *Controller) Reserve(ctx context.Context, shareConfig share.Config, moun if ok { // Verify the caller is requesting the same share configuration. if !existingShare.Config().Equals(shareConfig) { - return guid.GUID{}, "", fmt.Errorf("cannot reserve ref on share with different config") + return guid.GUID{}, fmt.Errorf("cannot reserve ref on share with different config") } // Set the share name. @@ -124,10 +122,8 @@ func (c *Controller) Reserve(ctx context.Context, shareConfig share.Config, moun // We have a share, now reserve a mount on it. if _, err = existingShare.ReserveMount(ctx, mountConfig); err != nil { - return guid.GUID{}, "", fmt.Errorf("reserve mount on share %s: %w", existingShare.Name(), err) + return guid.GUID{}, fmt.Errorf("reserve mount on share %s: %w", existingShare.Name(), err) } - - guestPath = existingShare.GuestPath() } // If we don't have an existing share, we need to create one and reserve a mount on it. @@ -139,13 +135,11 @@ func (c *Controller) Reserve(ctx context.Context, shareConfig share.Config, moun // Create the Share and Mount in the reserved states. newShare := share.NewReserved(name, shareConfig) if _, err = newShare.ReserveMount(ctx, mountConfig); err != nil { - return guid.GUID{}, "", fmt.Errorf("reserve mount on share %s: %w", name, err) + return guid.GUID{}, fmt.Errorf("reserve mount on share %s: %w", name, err) } c.sharesByHostPath[shareConfig.HostPath] = newShare res.name = newShare.Name() - - guestPath = newShare.GuestPath() } // Ensure our reservation is saved for all future operations. @@ -153,7 +147,7 @@ func (c *Controller) Reserve(ctx context.Context, shareConfig share.Config, moun log.G(ctx).WithField("reservation", id).Debug("Plan9 share reserved") // Return the reserved guest path in addition to the reservation ID for caller convenience. - return id, guestPath, nil + return id, nil } // MapToGuest adds the reserved share to the VM and mounts it inside the guest, diff --git a/internal/controller/device/plan9/controller_test.go b/internal/controller/device/plan9/controller_test.go index 532f3e7bbe..e1588cb65d 100644 --- a/internal/controller/device/plan9/controller_test.go +++ b/internal/controller/device/plan9/controller_test.go @@ -93,18 +93,15 @@ func newTestController(t *testing.T, noWritableFileShares bool) *testController // ───────────────────────────────────────────────────────────────────────────── // TestReserve_NewShare verifies that reserving a new host path creates a share -// entry, returns a non-empty guest path, and stores a unique reservation ID. +// entry and stores a unique reservation ID. func TestReserve_NewShare(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id, guestPath, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) if err != nil { t.Fatalf("unexpected error: %v", err) } - if guestPath == "" { - t.Error("expected non-empty guest path") - } if len(tc.c.reservations) != 1 { t.Errorf("expected 1 reservation, got %d", len(tc.c.reservations)) } @@ -118,20 +115,17 @@ func TestReserve_NewShare(t *testing.T) { // TestReserve_SameHostPath_RefsExistingShare verifies that two reservations for // the same host path reuse the existing share (only one share entry) and return -// distinct reservation IDs but the same guest path. +// distinct reservation IDs. func TestReserve_SameHostPath_RefsExistingShare(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id1, gp1, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) - id2, gp2, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id1, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id2, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) if id1 == id2 { t.Error("expected different reservation IDs for two callers on the same path") } - if gp1 != gp2 { - t.Errorf("expected same guest path for same host path, got %q vs %q", gp1, gp2) - } if len(tc.c.sharesByHostPath) != 1 { t.Errorf("expected 1 share for same host path, got %d", len(tc.c.sharesByHostPath)) } @@ -146,8 +140,8 @@ func TestReserve_DifferentHostPaths_CreatesSeparateShares(t *testing.T) { t.Parallel() tc := newTestController(t, false) - _, _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/a"}, mount.Config{}) - _, _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/b"}, mount.Config{}) + _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/a"}, mount.Config{}) + _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/b"}, mount.Config{}) if len(tc.c.sharesByHostPath) != 2 { t.Errorf("expected 2 shares, got %d", len(tc.c.sharesByHostPath)) @@ -161,10 +155,10 @@ func TestReserve_DifferentConfig_SameHostPath_Errors(t *testing.T) { t.Parallel() tc := newTestController(t, false) - _, _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) // Same host path but read-only flag differs. - _, _, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path", ReadOnly: true}, mount.Config{}) + _, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path", ReadOnly: true}, mount.Config{}) if err == nil { t.Fatal("expected error when re-reserving same host path with different config") } @@ -177,7 +171,7 @@ func TestReserve_WritableDenied(t *testing.T) { t.Parallel() tc := newTestController(t, true /* noWritableFileShares */) - _, _, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path" /* writable */}, mount.Config{}) + _, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path" /* writable */}, mount.Config{}) if err == nil { t.Fatal("expected error when adding writable share with noWritableFileShares=true") } @@ -189,7 +183,7 @@ func TestReserve_ReadOnlyAllowedWhenWritableDenied(t *testing.T) { t.Parallel() tc := newTestController(t, true /* noWritableFileShares */) - _, _, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path", ReadOnly: true /* readOnly */}, mount.Config{ReadOnly: true}) + _, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path", ReadOnly: true /* readOnly */}, mount.Config{ReadOnly: true}) if err != nil { t.Fatalf("unexpected error for read-only share: %v", err) } @@ -201,8 +195,8 @@ func TestReserve_NameCounterIncrements(t *testing.T) { t.Parallel() tc := newTestController(t, false) - _, _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/a"}, mount.Config{}) - _, _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/b"}, mount.Config{}) + _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/a"}, mount.Config{}) + _, _ = tc.c.Reserve(tc.ctx, share.Config{HostPath: "/path/b"}, mount.Config{}) if tc.c.nameCounter != 2 { t.Errorf("expected nameCounter=2, got %d", tc.c.nameCounter) @@ -215,12 +209,12 @@ func TestReserve_NameCounterIncrements(t *testing.T) { // TestMapToGuest_HappyPath verifies a normal Reserve → MapToGuest flow: the // share is added to the VM and mounted in the guest, returning a non-empty -// guest path that matches the one returned during Reserve. +// guest path. func TestMapToGuest_HappyPath(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id, gp, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) @@ -232,9 +226,6 @@ func TestMapToGuest_HappyPath(t *testing.T) { if guestPath == "" { t.Error("expected non-empty guest path") } - if guestPath != gp { - t.Errorf("expected guest path %q from MapToGuest to match reservation guest path %q", guestPath, gp) - } } // TestMapToGuest_VMAddFails_Errors verifies that when the host-side AddPlan9 @@ -243,7 +234,7 @@ func TestMapToGuest_VMAddFails_Errors(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) @@ -261,7 +252,7 @@ func TestMapToGuest_GuestMountFails_RetryMapToGuest_Fails(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) // First MapToGuest: VM add succeeds, guest mount fails. tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) @@ -294,7 +285,7 @@ func TestMapToGuest_GuestMountFails_UnmapFromGuest_CleansUp(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) // MapToGuest: VM add succeeds, guest mount fails. tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) @@ -331,8 +322,8 @@ func TestMapToGuest_SharedPath_VMAddCalledOnce(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id1, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) - id2, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id1, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id2, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) // AddPlan9 and AddLCOWMappedDirectory each called exactly once. tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil).Times(1) @@ -362,7 +353,7 @@ func TestUnmapFromGuest_HappyPath(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) @@ -389,7 +380,7 @@ func TestUnmapFromGuest_GuestUnmountFails_Retryable(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) @@ -420,7 +411,7 @@ func TestUnmapFromGuest_VMRemoveFails_Retryable(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) @@ -451,8 +442,8 @@ func TestUnmapFromGuest_RefCounting_VMRemoveOnLastRef(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id1, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) - id2, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id1, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id2, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil).Times(1) tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil).Times(1) @@ -486,7 +477,7 @@ func TestUnmapFromGuest_WithoutMapToGuest_CleansUp(t *testing.T) { tc := newTestController(t, false) // Reserve but never MapToGuest — no VM or guest calls expected. - id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) if err := tc.c.UnmapFromGuest(tc.ctx, id); err != nil { t.Fatalf("unexpected error: %v", err) @@ -508,7 +499,7 @@ func TestFullLifecycle_ReuseAfterRelease(t *testing.T) { tc := newTestController(t, false) // First full cycle. - id1, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id1, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) tc.guestMount.EXPECT().AddLCOWMappedDirectory(gomock.Any(), gomock.Any()).Return(nil) _, _ = tc.c.MapToGuest(tc.ctx, id1) @@ -517,7 +508,7 @@ func TestFullLifecycle_ReuseAfterRelease(t *testing.T) { _ = tc.c.UnmapFromGuest(tc.ctx, id1) // Reserve the same path again — should start a fresh share. - id2, _, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id2, err := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) if err != nil { t.Fatalf("re-reserve after release: %v", err) } @@ -536,8 +527,8 @@ func TestUnmapFromGuest_AddToVMFails_MultipleReservations_AllDrain(t *testing.T) tc := newTestController(t, false) // Two callers reserve the same host path. - id1, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) - id2, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id1, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id2, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) // First caller attempts MapToGuest — AddToVM fails. tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) @@ -585,8 +576,8 @@ func TestUnmapFromGuest_GuestMountFails_MultipleReservations_AllDrain(t *testing tc := newTestController(t, false) // Two callers reserve the same host path. - id1, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) - id2, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id1, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id2, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) // First caller attempts MapToGuest — AddToVM succeeds, guest mount fails. tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(nil) @@ -635,7 +626,7 @@ func TestUnmapFromGuest_AddToVMFails_SingleReservation_Drains(t *testing.T) { t.Parallel() tc := newTestController(t, false) - id, _, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) + id, _ := tc.c.Reserve(tc.ctx, share.Config{HostPath: "/host/path"}, mount.Config{}) // MapToGuest fails at AddToVM. tc.vmAdd.EXPECT().AddPlan9(gomock.Any(), gomock.Any()).Return(errVMAdd) diff --git a/internal/controller/device/plan9/share/share.go b/internal/controller/device/plan9/share/share.go index 51ec250c51..7069a0dca2 100644 --- a/internal/controller/device/plan9/share/share.go +++ b/internal/controller/device/plan9/share/share.go @@ -65,15 +65,6 @@ func (s *Share) HostPath() string { return s.config.HostPath } -// GuestPath returns the guest-side mount path of the share. -// Valid only if a mount has been reserved on this share; otherwise returns an empty string. -func (s *Share) GuestPath() string { - if s.mount == nil { - return "" - } - return s.mount.GuestPath() -} - // AddToVM adds the share to the VM's Plan9 provider. It is idempotent for an // already-added share; on failure the share is moved into invalid state so // that outstanding mount reservations can be drained before the share is diff --git a/internal/controller/device/plan9/share/share_test.go b/internal/controller/device/plan9/share/share_test.go index 6c4e991e24..fb07cea2f2 100644 --- a/internal/controller/device/plan9/share/share_test.go +++ b/internal/controller/device/plan9/share/share_test.go @@ -32,8 +32,8 @@ func newTestConfig() Config { // ───────────────────────────────────────────────────────────────────────────── // TestNewReserved_InitialState verifies that a freshly created share starts in -// StateReserved with the correct name, host path, and an empty guest path -// (no mount has been reserved yet). +// StateReserved with the correct name and host path (no mount has been reserved +// yet). func TestNewReserved_InitialState(t *testing.T) { s := NewReserved("share0", newTestConfig()) if s.State() != StateReserved { @@ -45,9 +45,6 @@ func TestNewReserved_InitialState(t *testing.T) { if s.HostPath() != "/host/path" { t.Errorf("expected host path %q, got %q", "/host/path", s.HostPath()) } - if s.GuestPath() != "" { - t.Errorf("expected empty guest path before mount, got %q", s.GuestPath()) - } } // ───────────────────────────────────────────────────────────────────────────── @@ -451,7 +448,7 @@ func TestMountToGuest_HappyPath(t *testing.T) { // TestUnmountFromGuest_HappyPath verifies a full mount → unmount cycle: after // a successful guest RemoveLCOWMappedDirectory call the share's internal mount -// entry is cleared (GuestPath returns empty). +// entry is cleared. func TestUnmountFromGuest_HappyPath(t *testing.T) { ctrl := gomock.NewController(t) vmAdd := mocks.NewMockVMPlan9Adder(ctrl) @@ -470,11 +467,6 @@ func TestUnmountFromGuest_HappyPath(t *testing.T) { if err := s.UnmountFromGuest(context.Background(), guestUnmount); err != nil { t.Fatalf("unexpected error: %v", err) } - - // After unmount the share's internal mount entry should be cleared. - if s.GuestPath() != "" { - t.Errorf("expected empty GuestPath after unmount, got %q", s.GuestPath()) - } } // TestUnmountFromGuest_NoMount_IsNoOp verifies that calling UnmountFromGuest on