SpiceDB's WriteRelationships fails silently if payload is too big
Description
SpiceDB is an open source database system for creating and managing security-critical application permissions. In versions prior to 1.45.2, users who use the exclusion operator somewhere in their authorization schema; have configured their SpiceDB server such that --write-relationships-max-updates-per-call is bigger than 6500; and issue calls to WriteRelationships with a large enough number of updates that cause the payload to be bigger than what their datastore allows; will receive a successful response from their WriteRelationships call, when in reality that call failed, and receive incorrect permission check results, if those relationships had to be read to resolve the relation involving the exclusion. Version 1.45.2 contains a patch for the issue. As a workaround, set --write-relationships-max-updates-per-call to 1000.
Affected products
1Patches
1d0cd103a92ccAdd missing error checks for Postgres readwrite and add a test to ensure that an internal error is rewritten nicely
4 files changed · +78 −1
internal/datastore/common/errors.go+24 −0 modified@@ -43,6 +43,30 @@ func NewSerializationError(err error) error { return SerializationError{err} } +type WriteOverLimitError struct { + error +} + +func (err WriteOverLimitError) GRPCStatus() *status.Status { + return spiceerrors.WithCodeAndDetails( + err, + codes.Aborted, + spiceerrors.ForReason( + v1.ErrorReason_ERROR_REASON_TOO_MANY_UPDATES_IN_REQUEST, + map[string]string{}, + ), + ) +} + +func (err WriteOverLimitError) Unwrap() error { + return err.error +} + +// NewWriteOverLimitError creates a new WriteOverLimitError. +func NewWriteOverLimitError(err error) error { + return WriteOverLimitError{err} +} + // ReadOnlyTransactionError is returned when an otherwise read-write // transaction fails on writes with an error indicating that the datastore // is currently in a read-only mode.
internal/datastore/mysql/watch.go+1 −1 modified@@ -182,7 +182,7 @@ func (mds *Datastore) loadChanges( } } rows.Close() - if err = rows.Err(); err != nil { + if rows.Err() != nil { return }
internal/datastore/postgres/postgres_shared_test.go+42 −0 modified@@ -285,6 +285,16 @@ func testPostgresDatastore(t *testing.T, config postgresTestConfig) { WatchBufferLength(1), MigrationPhase(config.migrationPhase), )) + + t.Run("ExceedInsertQuerySizeTest", createDatastoreTest( + b, + ExceedInsertQuerySizeTest, + RevisionQuantization(0), + GCWindow(1*time.Millisecond), + GCInterval(veryLargeGCInterval), + WatchBufferLength(1), + MigrationPhase(config.migrationPhase), + )) }) } @@ -2030,4 +2040,36 @@ func ContinuousCheckpointTest(t *testing.T, ds datastore.Datastore) { } } +func ExceedInsertQuerySizeTest(t *testing.T, ds datastore.Datastore) { + require := require.New(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, err := ds.ReadWriteTx(ctx, func(ctx context.Context, rwt datastore.ReadWriteTransaction) error { + updates := make([]tuple.RelationshipUpdate, 0, 20_000) + for i := range 20_000 { + tpl := tuple.MustParse(fmt.Sprintf("resource:resource-%d#reader%d@user%d:user-%d", i, i, i, i)) + t := time.Now().Add(24 * time.Hour).Add(time.Duration(i) * time.Minute) + tpl.OptionalExpiration = &t + updates = append(updates, tuple.Touch(tpl)) + } + return rwt.WriteRelationships(ctx, updates) + }) + require.Error(err) + require.ErrorContains(err, "exceeds the maximum size supported by this datastore") + + headRev, err := ds.HeadRevision(ctx) + require.NoError(err) + iter, err := ds.SnapshotReader(headRev).QueryRelationships(context.Background(), datastore.RelationshipsFilter{ + OptionalResourceType: "resource", + }) + require.NoError(err) + count := 0 + for range iter { + count++ + } + require.Equal(0, count, "expected to have 0 relationships, but found %d", count) +} + const waitForChangesTimeout = 10 * time.Second
internal/datastore/postgres/readwrite.go+11 −0 modified@@ -293,6 +293,10 @@ func (rwt *pgReadWriteTXN) WriteRelationships(ctx context.Context, mutations []t } rows.Close() + if rows.Err() != nil { + return handleWriteError(rows.Err()) + } + // For each remaining TOUCH mutation, add a "DELETE" operation for the row such that if the caveat and/or // context has changed, the row will be deleted. For ones in which the caveat name and/or context did cause // the deletion (because of a change), the row will be re-inserted with the new caveat name and/or context. @@ -390,6 +394,9 @@ func (rwt *pgReadWriteTXN) WriteRelationships(ctx context.Context, mutations []t touchWriteHasValues = true } rows.Close() + if rows.Err() != nil { + return handleWriteError(rows.Err()) + } // If no INSERTs are necessary to update caveats, then nothing more to do. if !touchWriteHasValues { @@ -415,6 +422,10 @@ func handleWriteError(err error) error { return common.NewSerializationError(fmt.Errorf("unable to write relationships due to a serialization error: [%w]; this typically indicates that a number of write transactions are contending over the same relationships; either reduce the contention or scale this Postgres instance", err)) } + if err.Error() == "extended protocol limited to 65535 parameters" { + return common.NewWriteOverLimitError(fmt.Errorf("the specified write operation exceeds the maximum size supported by this datastore; please reduce the number of updates in the call")) + } + return fmt.Errorf(errUnableToWriteRelationships, err) }
Vulnerability mechanics
Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.
References
4News mentions
0No linked articles in our index yet.