Low severity2.3GHSA Advisory· Published May 14, 2026· Updated May 14, 2026
CVE-2026-42186
CVE-2026-42186
Description
OpenBao is an open source identity-based secrets management system. Prior to 2.5.3, when OpenBao's initial namespace deletion fails, subsequent retries fail to properly remove all data before marking the namespace as deleted. This can affect any outstanding leases as well as potentially leaving unrelated storage entries around. This vulnerability is fixed in 2.5.3.
Affected products
1Patches
16d2e0506e2b4Ensure lease revocation on namespace re-deletion (#2935)
3 files changed · +216 −2
changelog/2935.txt+3 −0 added@@ -0,0 +1,3 @@ +```release-note:security +core/namespaces: Ensure lease revocation on namespace re-deletion. GHSA-vv66-6rp4-wr4f. +```
vault/external_tests/namespaces/namespace_test.go+179 −0 added@@ -0,0 +1,179 @@ +// Copyright (c) 2026 OpenBao a Series of LF Projects, LLC +// SPDX-License-Identifier: MPL-2.0 + +package namespaces + +import ( + "fmt" + "os" + "sync" + "testing" + "time" + + "github.com/openbao/openbao/api/v2" + "github.com/openbao/openbao/builtin/credential/userpass" + logicalKv "github.com/openbao/openbao/builtin/logical/kv" + vaulthttp "github.com/openbao/openbao/http" + "github.com/openbao/openbao/sdk/v2/logical" + "github.com/openbao/openbao/vault" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNamespaceRepeatedDeletion(t *testing.T) { + if os.Getenv("CI") != "" { + t.Skip("Creation of 500 KV+userpass objects causes context cancellation timeouts in CI") + } + + t.Parallel() + + coreConfig := &vault.CoreConfig{ + DisableCache: true, + EnableRaw: true, + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv-v2": logicalKv.Factory, + "kv-v1": logicalKv.Factory, + "kv": logicalKv.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 2, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + nsResp, err := client.Logical().Write("sys/namespaces/ns1", map[string]any{}) + require.NoError(t, err) + require.NotNil(t, nsResp) + require.Contains(t, nsResp.Data, "uuid") + + nsId := nsResp.Data["uuid"] + + ns1Client := client.WithNamespace("ns1") + err = ns1Client.Sys().PutPolicy("admin", adminPolicy) + require.NoError(t, err) + + var wg sync.WaitGroup + for i := range 10 { + wg.Go(func() { + populateMounts(t, ns1Client, fmt.Sprintf("secret-%v", i), 25) + }) + wg.Go(func() { + populateAuth(t, ns1Client, fmt.Sprintf("userpass-%v", i), 25) + }) + } + + wg.Wait() + + _, err = client.Logical().Delete("sys/namespaces/ns1") + require.NoError(t, err) + + // This should effectively cancel namespace deletion. + err = client.Sys().StepDown() + require.NoError(t, err) + + vault.TestWaitActive(t, cores[1].Core) + + client = cores[1].Client + + resp, err := client.Logical().List("sys/namespaces") + require.NoError(t, err) + require.Contains(t, resp.Data, "keys") + require.Contains(t, resp.Data["keys"], "ns1/") + + resp, err = client.Logical().Read("sys/namespaces/ns1") + require.NoError(t, err) + require.Contains(t, resp.Data, "tainted") + require.True(t, resp.Data["tainted"].(bool)) + + _, err = client.Logical().Delete("sys/namespaces/ns1") + require.NoError(t, err) + + // Ensure namespace eventually is removed. + require.EventuallyWithT(t, func(t *assert.CollectT) { + resp, err := client.Logical().List("sys/namespaces") + require.NoError(t, err) + + if resp == nil { + return + } + + if _, ok := resp.Data["keys"]; !ok { + return + } + + require.Empty(t, resp.Data["keys"], "did not expect any namespaces") + }, 10*time.Second, 10*time.Millisecond) + + // Namespace storage should be empty. + resp, err = client.Logical().List(fmt.Sprintf("sys/raw/namespaces/%v", nsId)) + require.NoError(t, err) + require.Nil(t, resp) +} + +func populateAuth(t *testing.T, client *api.Client, name string, users int) { + err := client.Sys().EnableAuth(name, "userpass", name) + require.NoError(t, err) + + var wg sync.WaitGroup + for i := range users { + wg.Go(func() { + _, err := client.Logical().Write(fmt.Sprintf("auth/%v/users/admin-%v", name, i), map[string]any{ + "password": fmt.Sprintf("secret-%v", i), + "token_policies": "admin", + }) + require.NoError(t, err) + + _, err = client.Logical().Write(fmt.Sprintf("auth/%v/login/admin-%v", name, i), map[string]any{ + "password": fmt.Sprintf("secret-%v", i), + }) + require.NoError(t, err) + }) + } + + wg.Wait() +} + +func populateMounts(t *testing.T, client *api.Client, name string, entries int) { + err := client.Sys().Mount(name, &api.MountInput{ + Type: "kv-v2", + }) + require.NoError(t, err) + + // Wait for KVv2 migration to complete. + require.EventuallyWithT(t, func(t *assert.CollectT) { + resp, err := client.Logical().Read(fmt.Sprintf("%v/config", name)) + require.NoError(t, err) + require.NotNil(t, resp) + require.Contains(t, resp.Data, "cas_required") + }, 10*time.Second, 10*time.Millisecond) + + var wg sync.WaitGroup + for i := range entries { + wg.Go(func() { + _, err := client.KVv2(name).Put(t.Context(), fmt.Sprintf("entry-%v", i), map[string]any{ + "value": i, + }) + require.NoError(t, err) + }) + } + wg.Wait() +} + +const adminPolicy = ` +path "*" { + capabilities = ["create", "read", "update", "delete", "list", "scan", "sudo"] +} +`
vault/namespace_store.go+34 −2 modified@@ -53,10 +53,11 @@ const ( // overhead. nsMaxWorkers = 2 + /* namespace and overhead */ 1 + /* policies */ - 2 + /* auth + mount */ + 3 + /* reload + auth + mount */ 1 + /* identity */ 1 + /* quotas */ - 1 /* locked user entries */ + 1 + /* locked user entries */ + 1 /* final view clearing */ ) // NamespaceStore is used to provide durable storage of namespace. It is @@ -1078,6 +1079,19 @@ func (ns *NamespaceStore) clearNamespaceResources(nsCtx context.Context, parent, return err } + if updateStorage { + // To clear auth+secret mounts, we first need to load that portion of the + // mount table that this namespace has. Otherwise, things like lease cleanup + // will not run if the mount was not already loaded. + nonTaintedNs := entry.Clone(false) + nonTaintedNs.Tainted = false + nonTaintedCtx := namespace.ContextWithNamespace(nsCtx, nonTaintedNs) + + if err := ns.core.reloadNamespaceMounts(nonTaintedCtx, entry.UUID, false /* not yet deleted */); err != nil { + return fmt.Errorf("failed to reload namespace mounts: %w", err) + } + } + // clear auth mounts ns.core.authLock.Lock() authMountEntries, err := ns.core.auth.FindAllNamespaceMounts(nsCtx) @@ -1131,6 +1145,24 @@ func (ns *NamespaceStore) clearNamespaceResources(nsCtx context.Context, parent, if _, err := ns.core.runLockedUserEntryUpdatesForNamespace(nsCtx, entry, true); err != nil { return fmt.Errorf("failed to clean up locked user entries: %w", err) } + + // clear any remaining storage; while ideally this would not occur, it + // gives us now a signal if it did (debug entries) and additionally + // gives us a clear path to remediate. + // + // This is in contrast to the current method where storage entries would + // be silently left lying around. + view := ns.core.NamespaceView(entry) + if err := logical.ScanViewPaginated(nsCtx, view, ns.logger, logical.DefaultScanViewPageLimit, func(page int, index int, path string) (cont bool, err error) { + if err := view.Delete(nsCtx, path); err != nil { + return false, fmt.Errorf("failed removing entry: %w", err) + } + + ns.logger.Debug("bug: removing entry remaining in namespace storage after all mounts were removed", "namespace", entry.Path, "path", path) + return true, nil + }); err != nil { + return fmt.Errorf("failed to clear namespace view: %w", err) + } } return nil
Vulnerability mechanics
Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.
References
5News mentions
0No linked articles in our index yet.