From a3e2e7e0315bc8d98bb51e32089aeb076e0c25a4 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Mon, 7 Jul 2025 18:20:19 +0300 Subject: [PATCH 01/27] sn/object: Refactor broadcast on PUT Use one instruction to calculate the flag. Also, do it per-object and do not store in the struct field. In addition, evaluate only for non-local requests. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/distibuted_test.go | 19 +++++++++---------- pkg/services/object/put/distributed.go | 18 ++++++------------ pkg/services/object/put/streamer.go | 10 +--------- 3 files changed, 16 insertions(+), 31 deletions(-) diff --git a/pkg/services/object/put/distibuted_test.go b/pkg/services/object/put/distibuted_test.go index 679c0c4d3e..a607f3c360 100644 --- a/pkg/services/object/put/distibuted_test.go +++ b/pkg/services/object/put/distibuted_test.go @@ -103,7 +103,7 @@ func TestIterateNodesForObject(t *testing.T) { } var handlerMtx sync.Mutex var handlerCalls []nodeDesc - err := iter.iterateNodesForObject(objID, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node) handlerMtx.Unlock() @@ -195,7 +195,7 @@ func TestIterateNodesForObject(t *testing.T) { } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -232,11 +232,10 @@ func TestIterateNodesForObject(t *testing.T) { cnrNodes: cnrNodes, primCounts: []uint{1, 1, 1}, }, - broadcast: true, } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, true, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -269,7 +268,7 @@ func TestIterateNodesForObject(t *testing.T) { sortErr: errors.New("any sort error"), }, } - err := iter.iterateNodesForObject(objID, func(nodeDesc) error { + err := iter.iterateNodesForObject(objID, false, func(nodeDesc) error { t.Fatal("must not be called") return nil }) @@ -298,7 +297,7 @@ func TestIterateNodesForObject(t *testing.T) { } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -342,7 +341,7 @@ func TestIterateNodesForObject(t *testing.T) { } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -380,7 +379,7 @@ func TestIterateNodesForObject(t *testing.T) { } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -421,7 +420,7 @@ func TestIterateNodesForObject(t *testing.T) { } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -461,7 +460,7 @@ func TestIterateNodesForObject(t *testing.T) { blockCh := make(chan struct{}) returnCh := make(chan struct{}) go func() { - err := iter.iterateNodesForObject(objID, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { <-blockCh return nil }) diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index be377f46ae..8756f8c236 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -131,12 +131,8 @@ func (t *distributedTarget) Close() (oid.ID, error) { t.obj.SetPayload(t.encodedObject.b[t.encodedObject.pldOff:]) - tombOrLink := t.obj.Type() == objectSDK.TypeLink || t.obj.Type() == objectSDK.TypeTombstone - - if !t.placementIterator.broadcast && len(t.obj.Children()) > 0 || tombOrLink { - // enabling extra broadcast for linking and tomb objects - t.placementIterator.broadcast = true - } + typ := t.obj.Type() + tombOrLink := typ == objectSDK.TypeLink || typ == objectSDK.TypeTombstone // v2 split link object and tombstone validations are expensive routines // and are useless if the node does not belong to the container, since @@ -165,7 +161,8 @@ func (t *distributedTarget) Close() (oid.ID, error) { err = errIncompletePut{singleErr: fmt.Errorf("%w (last node error: %w)", errNotEnoughNodes{required: 1}, err)} } } else { - err = t.placementIterator.iterateNodesForObject(id, t.sendObject) + broadcast := tombOrLink || typ == objectSDK.TypeLock || len(t.obj.Children()) > 0 + err = t.placementIterator.iterateNodesForObject(id, broadcast, t.sendObject) } if err != nil { return oid.ID{}, err @@ -380,12 +377,9 @@ type placementIterator struct { // when non-zero, this setting simplifies the object's storage policy // requirements to a fixed number of object replicas to be retained linearReplNum uint - // whether to perform additional best-effort of sending the object replica to - // all reserve nodes of the container - broadcast bool } -func (x placementIterator) iterateNodesForObject(obj oid.ID, f func(nodeDesc) error) error { +func (x placementIterator) iterateNodesForObject(obj oid.ID, broadcast bool, f func(nodeDesc) error) error { var replCounts []uint var l = x.log.With(zap.Stringer("oid", obj)) nodeLists, err := x.containerNodes.SortForObject(obj) @@ -514,7 +508,7 @@ func (x placementIterator) iterateNodesForObject(obj oid.ID, f func(nodeDesc) er wg.Wait() } } - if !x.broadcast { + if !broadcast { return nil } // TODO: since main part of the operation has already been completed, and diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index a30f8b0044..5f898b4124 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -10,7 +10,6 @@ import ( "github.com/nspcc-dev/neofs-node/pkg/services/object/util" "github.com/nspcc-dev/neofs-sdk-go/container" neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" - "github.com/nspcc-dev/neofs-sdk-go/object" "github.com/nspcc-dev/neofs-sdk-go/user" ) @@ -199,12 +198,6 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) internal.Target { } } - // enable additional container broadcast on non-local operation - // if object has TOMBSTONE or LOCK type. - typ := prm.hdr.Type() - localOnly := prm.common.LocalOnly() - withBroadcast := !localOnly && (typ == object.TypeTombstone || typ == object.TypeLock) - return &distributedTarget{ opCtx: p.ctx, fsState: p.networkState, @@ -216,7 +209,6 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) internal.Target { remotePool: p.remotePool, containerNodes: prm.containerNodes, linearReplNum: uint(prm.copiesNumber), - broadcast: withBroadcast, }, localStorage: p.localStore, keyStorage: p.keyStorage, @@ -230,7 +222,7 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) internal.Target { cnrClient: p.cfg.cnrClient, metainfoConsistencyAttr: metaAttribute(prm.cnr), metaSigner: prm.localSignerRFC6979, - localOnly: localOnly, + localOnly: prm.common.LocalOnly(), } } From 464adb55a8b6a5c4ff3cadba86b53e77dfd1fbbb Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Tue, 8 Jul 2025 12:28:08 +0300 Subject: [PATCH 02/27] sn/object: Separate code for ready object saving Will be useful for EC policies in #3420. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/distributed.go | 76 +++++++++++++++----------- 1 file changed, 44 insertions(+), 32 deletions(-) diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index 8756f8c236..22fd0b9a96 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -32,7 +32,6 @@ type distributedTarget struct { placementIterator placementIterator obj *objectSDK.Object - objMeta object.ContentMeta networkMagicNumber uint32 fsState netmapcore.StateDetailed @@ -138,34 +137,47 @@ func (t *distributedTarget) Close() (oid.ID, error) { // and are useless if the node does not belong to the container, since // another node is responsible for the validation and may decline it, // does not matter what this node thinks about it + var objMeta object.ContentMeta if !tombOrLink || t.localNodeInContainer { var err error - if t.objMeta, err = t.fmt.ValidateContent(t.obj); err != nil { + if objMeta, err = t.fmt.ValidateContent(t.obj); err != nil { return oid.ID{}, fmt.Errorf("(%T) could not validate payload content: %w", t, err) } } + err := t.saveObject(*t.obj, objMeta, t.encodedObject) + if err != nil { + return oid.ID{}, err + } + + return t.obj.GetID(), nil +} + +func (t *distributedTarget) saveObject(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject) error { if t.localNodeInContainer && t.metainfoConsistencyAttr != "" { - t.objSharedMeta = t.encodeCurrentObjectMetadata() + t.objSharedMeta = t.encodeObjectMetadata(obj) } - id := t.obj.GetID() + id := obj.GetID() var err error if t.localOnly { - var l = t.placementIterator.log.With(zap.Stringer("oid", t.obj.GetID())) + var l = t.placementIterator.log.With(zap.Stringer("oid", id)) - err = t.writeObjectLocally() + err = t.writeObjectLocally(obj, objMeta, encObj) if err != nil { err = fmt.Errorf("write object locally: %w", err) svcutil.LogServiceError(l, "PUT", nil, err) err = errIncompletePut{singleErr: fmt.Errorf("%w (last node error: %w)", errNotEnoughNodes{required: 1}, err)} } } else { - broadcast := tombOrLink || typ == objectSDK.TypeLock || len(t.obj.Children()) > 0 - err = t.placementIterator.iterateNodesForObject(id, broadcast, t.sendObject) + typ := obj.Type() + broadcast := typ == objectSDK.TypeTombstone || typ == objectSDK.TypeLink || typ == objectSDK.TypeLock || len(obj.Children()) > 0 + err = t.placementIterator.iterateNodesForObject(id, broadcast, func(node nodeDesc) error { + return t.sendObject(obj, objMeta, encObj, node) + }) } if err != nil { - return oid.ID{}, err + return err } if t.localNodeInContainer && t.metainfoConsistencyAttr != "" { @@ -173,7 +185,7 @@ func (t *distributedTarget) Close() (oid.ID, error) { defer t.metaMtx.RUnlock() if len(t.collectedSignatures) == 0 { - return oid.ID{}, fmt.Errorf("skip metadata chain submit for %s object: no signatures were collected", id) + return fmt.Errorf("skip metadata chain submit for %s object: no signatures were collected", id) } var await bool @@ -184,10 +196,10 @@ func (t *distributedTarget) Close() (oid.ID, error) { case "optimistic": await = false default: - return id, nil + return nil } - addr := object.AddressOf(t.obj) + addr := object.AddressOf(&obj) var objAccepted chan struct{} if await { objAccepted = make(chan struct{}, 1) @@ -199,14 +211,14 @@ func (t *distributedTarget) Close() (oid.ID, error) { if await { t.metaSvc.UnsubscribeFromObject(addr) } - return oid.ID{}, fmt.Errorf("failed to submit %s object meta information: %w", addr, err) + return fmt.Errorf("failed to submit %s object meta information: %w", addr, err) } if await { select { case <-t.opCtx.Done(): t.metaSvc.UnsubscribeFromObject(addr) - return oid.ID{}, fmt.Errorf("interrupted awaiting for %s object meta information: %w", addr, t.opCtx.Err()) + return fmt.Errorf("interrupted awaiting for %s object meta information: %w", addr, t.opCtx.Err()) case <-objAccepted: } } @@ -214,38 +226,38 @@ func (t *distributedTarget) Close() (oid.ID, error) { t.placementIterator.log.Debug("submitted object meta information", zap.Stringer("addr", addr)) } - return id, nil + return nil } -func (t *distributedTarget) encodeCurrentObjectMetadata() []byte { +func (t *distributedTarget) encodeObjectMetadata(obj objectSDK.Object) []byte { currBlock := t.fsState.CurrentBlock() currEpochDuration := t.fsState.CurrentEpochDuration() expectedVUB := (uint64(currBlock)/currEpochDuration + 2) * currEpochDuration - firstObj := t.obj.GetFirstID() - if t.obj.HasParent() && firstObj.IsZero() { + firstObj := obj.GetFirstID() + if obj.HasParent() && firstObj.IsZero() { // object itself is the first one - firstObj = t.obj.GetID() + firstObj = obj.GetID() } var deletedObjs []oid.ID var lockedObjs []oid.ID - typ := t.obj.Type() + typ := obj.Type() switch typ { case objectSDK.TypeTombstone: - deletedObjs = append(deletedObjs, t.obj.AssociatedObject()) + deletedObjs = append(deletedObjs, obj.AssociatedObject()) case objectSDK.TypeLock: - lockedObjs = append(lockedObjs, t.obj.AssociatedObject()) + lockedObjs = append(lockedObjs, obj.AssociatedObject()) default: } - return object.EncodeReplicationMetaInfo(t.obj.GetContainerID(), t.obj.GetID(), firstObj, t.obj.GetPreviousID(), - t.obj.PayloadSize(), typ, deletedObjs, lockedObjs, expectedVUB, t.networkMagicNumber) + return object.EncodeReplicationMetaInfo(obj.GetContainerID(), obj.GetID(), firstObj, obj.GetPreviousID(), + obj.PayloadSize(), typ, deletedObjs, lockedObjs, expectedVUB, t.networkMagicNumber) } -func (t *distributedTarget) sendObject(node nodeDesc) error { +func (t *distributedTarget) sendObject(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject, node nodeDesc) error { if node.local { - if err := t.writeObjectLocally(); err != nil { + if err := t.writeObjectLocally(obj, objMeta, encObj); err != nil { return fmt.Errorf("write object locally: %w", err) } return nil @@ -257,13 +269,13 @@ func (t *distributedTarget) sendObject(node nodeDesc) error { var sigsRaw []byte var err error - if t.encodedObject.hdrOff > 0 { - sigsRaw, err = t.transport.SendReplicationRequestToNode(t.opCtx, t.encodedObject.b, node.info) + if encObj.hdrOff > 0 { + sigsRaw, err = t.transport.SendReplicationRequestToNode(t.opCtx, encObj.b, node.info) if err != nil { err = fmt.Errorf("replicate object to remote node (key=%x): %w", node.info.PublicKey(), err) } } else { - err = putObjectToNode(t.opCtx, node.info, t.obj, t.keyStorage, t.clientConstructor, t.commonPrm) + err = putObjectToNode(t.opCtx, node.info, &obj, t.keyStorage, t.clientConstructor, t.commonPrm) } if err != nil { return fmt.Errorf("could not close object stream: %w", err) @@ -272,7 +284,7 @@ func (t *distributedTarget) sendObject(node nodeDesc) error { if t.localNodeInContainer && t.metainfoConsistencyAttr != "" { // These should technically be errors, but we don't have // a complete implementation now, so errors are substituted with logs. - var l = t.placementIterator.log.With(zap.Stringer("oid", t.obj.GetID()), + var l = t.placementIterator.log.With(zap.Stringer("oid", obj.GetID()), zap.String("node", network.StringifyGroup(node.info.AddressGroup()))) sigs, err := decodeSignatures(sigsRaw) @@ -303,8 +315,8 @@ func (t *distributedTarget) sendObject(node nodeDesc) error { return nil } -func (t *distributedTarget) writeObjectLocally() error { - if err := putObjectLocally(t.localStorage, t.obj, t.objMeta, &t.encodedObject); err != nil { +func (t *distributedTarget) writeObjectLocally(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject) error { + if err := putObjectLocally(t.localStorage, &obj, objMeta, &encObj); err != nil { return err } From 7628001ccfeeb4d2039f8f369e995b361018099b Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Tue, 15 Jul 2025 16:51:52 +0300 Subject: [PATCH 03/27] sn/object: Replace stateless method with standalone function So it can be reused. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/distributed.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index 22fd0b9a96..52a0a58c32 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -478,7 +478,7 @@ func (x placementIterator) iterateNodesForObject(obj oid.ID, broadcast bool, f f continue } if nr.desc.local = x.neoFSNet.IsLocalNodePublicKey(pk); !nr.desc.local { - nr.desc.info, nr.convertErr = x.convertNodeInfo(nodeLists[listInd][j]) + nr.desc.info, nr.convertErr = convertNodeInfo(nodeLists[listInd][j]) } processedNodesMtx.Lock() nodeResults[pks] = nr @@ -538,7 +538,7 @@ broadcast: continue } if nr.desc.local = x.neoFSNet.IsLocalNodePublicKey(pk); !nr.desc.local { - nr.desc.info, nr.convertErr = x.convertNodeInfo(nodeLists[i][j]) + nr.desc.info, nr.convertErr = convertNodeInfo(nodeLists[i][j]) } processedNodesMtx.Lock() nodeResults[pks] = nr @@ -568,7 +568,7 @@ broadcast: return nil } -func (x placementIterator) convertNodeInfo(nodeInfo netmap.NodeInfo) (client.NodeInfo, error) { +func convertNodeInfo(nodeInfo netmap.NodeInfo) (client.NodeInfo, error) { var res client.NodeInfo var endpoints network.AddressGroup if err := endpoints.FromIterator(network.NodeEndpointsIterator(nodeInfo)); err != nil { From b798c9a440a462c31729f06673fb9b4d90720401 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Tue, 5 Aug 2025 15:13:15 +0300 Subject: [PATCH 04/27] sn/object: Initial support for erasure coding policies by PUT server This extends PUT server logic to comply with EC policies - independently or together with REP ones. This logic will remain inactive until it becomes possible to create containers with EC policies. This works for small objects only: encoding and distribution will be the same for big objects, but SNs will deny them without proper adaptation. Closes #3419. Refs #3420. Signed-off-by: Leonard Lyubich --- cmd/neofs-node/object.go | 2 + go.mod | 4 +- go.sum | 9 +- internal/ec/ec.go | 81 +++++++ internal/ec/ec_test.go | 76 ++++++ internal/ec/object_test.go | 174 ++++++++++++++ internal/ec/objects.go | 75 ++++++ internal/object/attributes.go | 80 +++++++ internal/object/attributes_test.go | 174 ++++++++++++++ internal/slices/index.go | 17 ++ internal/slices/index_test.go | 24 ++ internal/slices/slices.go | 38 +++ internal/slices/slices_test.go | 40 ++++ pkg/services/object/put/distibuted_test.go | 98 +------- pkg/services/object/put/distributed.go | 75 ++++-- pkg/services/object/put/ec.go | 135 +++++++++++ pkg/services/object/put/prm.go | 3 + pkg/services/object/put/service.go | 8 + pkg/services/object/put/service_test.go | 264 ++++++++++++++++++++- pkg/services/object/put/streamer.go | 47 +++- pkg/services/object/put/util.go | 19 ++ 21 files changed, 1321 insertions(+), 122 deletions(-) create mode 100644 internal/ec/ec.go create mode 100644 internal/ec/ec_test.go create mode 100644 internal/ec/object_test.go create mode 100644 internal/ec/objects.go create mode 100644 internal/object/attributes.go create mode 100644 internal/object/attributes_test.go create mode 100644 internal/slices/index.go create mode 100644 internal/slices/index_test.go create mode 100644 internal/slices/slices.go create mode 100644 internal/slices/slices_test.go create mode 100644 pkg/services/object/put/ec.go create mode 100644 pkg/services/object/put/util.go diff --git a/cmd/neofs-node/object.go b/cmd/neofs-node/object.go index f1e9ab4c0c..472e022cc0 100644 --- a/cmd/neofs-node/object.go +++ b/cmd/neofs-node/object.go @@ -10,6 +10,7 @@ import ( "github.com/google/uuid" lru "github.com/hashicorp/golang-lru/v2" + iec "github.com/nspcc-dev/neofs-node/internal/ec" coreclient "github.com/nspcc-dev/neofs-node/pkg/core/client" containercore "github.com/nspcc-dev/neofs-node/pkg/core/container" "github.com/nspcc-dev/neofs-node/pkg/core/netmap" @@ -804,6 +805,7 @@ type containerNodesSorter struct { func (x *containerNodesSorter) Unsorted() [][]netmapsdk.NodeInfo { return x.policy.nodeSets } func (x *containerNodesSorter) PrimaryCounts() []uint { return x.policy.repCounts } +func (x *containerNodesSorter) ECRules() []iec.Rule { return nil } func (x *containerNodesSorter) SortForObject(obj oid.ID) ([][]netmapsdk.NodeInfo, error) { cacheKey := objectNodesCacheKey{epoch: x.curEpoch} cacheKey.addr.SetContainer(x.cnrID) diff --git a/go.mod b/go.mod index d666d4e75f..f2392af354 100644 --- a/go.mod +++ b/go.mod @@ -13,10 +13,12 @@ require ( github.com/google/uuid v1.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/klauspost/compress v1.17.11 + github.com/klauspost/reedsolomon v1.12.4 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/mapstructure v1.5.0 github.com/mr-tron/base58 v1.2.0 github.com/multiformats/go-multiaddr v0.12.2 + github.com/mxschmitt/golang-combinations v1.2.0 github.com/nspcc-dev/hrw/v2 v2.0.3 github.com/nspcc-dev/locode-db v0.6.0 github.com/nspcc-dev/neo-go v0.111.0 @@ -62,7 +64,7 @@ require ( github.com/holiman/uint256 v1.3.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/go-cid v0.4.1 // indirect - github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/klauspost/cpuid/v2 v2.2.11 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/minio/sha256-simd v1.0.1 // indirect diff --git a/go.sum b/go.sum index 8d68b5140c..1d389f75be 100644 --- a/go.sum +++ b/go.sum @@ -121,8 +121,10 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= -github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.11 h1:0OwqZRYI2rFrjS4kvkDnqJkKHdHaRnCm68/DY4OxRzU= +github.com/klauspost/cpuid/v2 v2.2.11/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/reedsolomon v1.12.4 h1:5aDr3ZGoJbgu/8+j45KtUJxzYm8k08JGtB9Wx1VQ4OA= +github.com/klauspost/reedsolomon v1.12.4/go.mod h1:d3CzOMOt0JXGIFZm1StgkyF14EYr3xneR2rNWo7NcMU= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -185,6 +187,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxschmitt/golang-combinations v1.2.0 h1:V5E7MncIK8Yr1SL/SpdqMuSquFsfoIs5auI7Y3n8z14= +github.com/mxschmitt/golang-combinations v1.2.0/go.mod h1:RCm5eR03B+JrBOMRDLsKZWShluXdrHu+qwhPEJ0miBM= github.com/nspcc-dev/bbolt v0.0.0-20250612101626-5df2544a4a22 h1:M5Nmg1iCnbZngzIBDIlMr9vW+okFfcSMBvBlXG8r+14= github.com/nspcc-dev/bbolt v0.0.0-20250612101626-5df2544a4a22/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= github.com/nspcc-dev/dbft v0.4.0 h1:4/atD4GrrMEtrYBDiZPrPzdKZ6ws7PR/cg0M4DEdVeI= @@ -358,7 +362,6 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/internal/ec/ec.go b/internal/ec/ec.go new file mode 100644 index 0000000000..3a7a4bf5b8 --- /dev/null +++ b/internal/ec/ec.go @@ -0,0 +1,81 @@ +package ec + +import ( + "fmt" + "slices" + "strconv" + + "github.com/klauspost/reedsolomon" + "golang.org/x/exp/constraints" +) + +// Erasure coding attributes. +const ( + AttributePrefix = "__NEOFS__EC_" + AttributeRuleIdx = AttributePrefix + "RULE_IDX" + AttributePartIdx = AttributePrefix + "PART_IDX" +) + +// Rule represents erasure coding rule for object payload's encoding and placement. +type Rule struct { + DataPartNum uint8 + ParityPartNum uint8 +} + +// String implements [fmt.Stringer]. +func (x Rule) String() string { + return strconv.FormatUint(uint64(x.DataPartNum), 10) + "/" + strconv.FormatUint(uint64(x.ParityPartNum), 10) +} + +// Encode encodes given data according to specified EC rule and returns coded +// parts. First [Rule.DataPartNum] elements are data parts, other +// [Rule.ParityPartNum] ones are parity blocks. +// +// All parts are the same length. If data len is not divisible by +// [Rule.DataPartNum], last data part is aligned with zeros. +// +// If data is empty, all parts are nil. +func Encode(rule Rule, data []byte) ([][]byte, error) { + if len(data) == 0 { + return make([][]byte, rule.DataPartNum+rule.ParityPartNum), nil + } + + // TODO: Explore reedsolomon.Option for performance improvement. https://github.com/nspcc-dev/neofs-node/issues/3501 + enc, err := reedsolomon.New(int(rule.DataPartNum), int(rule.ParityPartNum)) + if err != nil { // should never happen with correct rule + return nil, fmt.Errorf("init Reed-Solomon encoder: %w", err) + } + + parts, err := enc.Split(data) + if err != nil { + return nil, fmt.Errorf("split data: %w", err) + } + + if err := enc.Encode(parts); err != nil { + return nil, fmt.Errorf("calculate Reed-Solomon parity: %w", err) + } + + return parts, nil +} + +// Decode decodes source data of known len from EC parts obtained by applying +// specified rule. +func Decode[LT constraints.Unsigned](rule Rule, dataLen LT, parts [][]byte) ([]byte, error) { + // TODO: Explore reedsolomon.Option for performance improvement. https://github.com/nspcc-dev/neofs-node/issues/3501 + dec, err := reedsolomon.New(int(rule.DataPartNum), int(rule.ParityPartNum)) + if err != nil { // should never happen with correct rule + return nil, fmt.Errorf("init Reed-Solomon decoder: %w", err) + } + + required := make([]bool, rule.DataPartNum+rule.ParityPartNum) + for i := range rule.DataPartNum { + required[i] = true + } + + if err := dec.ReconstructSome(parts, required); err != nil { + return nil, fmt.Errorf("restore Reed-Solomon: %w", err) + } + + // TODO: last part may be shorter, do not overallocate buffer. + return slices.Concat(parts[:rule.DataPartNum]...)[:dataLen], nil +} diff --git a/internal/ec/ec_test.go b/internal/ec/ec_test.go new file mode 100644 index 0000000000..2e37013e2e --- /dev/null +++ b/internal/ec/ec_test.go @@ -0,0 +1,76 @@ +package ec_test + +import ( + "testing" + + "github.com/klauspost/reedsolomon" + iec "github.com/nspcc-dev/neofs-node/internal/ec" + islices "github.com/nspcc-dev/neofs-node/internal/slices" + "github.com/nspcc-dev/neofs-node/internal/testutil" + "github.com/stretchr/testify/require" +) + +func TestRule_String(t *testing.T) { + r := iec.Rule{ + DataPartNum: 12, + ParityPartNum: 23, + } + require.Equal(t, "12/23", r.String()) +} + +func testEncode(t *testing.T, rule iec.Rule, data []byte) { + ln := uint(len(data)) + + parts, err := iec.Encode(rule, data) + require.NoError(t, err) + + res, err := iec.Decode(rule, ln, parts) + require.NoError(t, err) + require.Equal(t, data, res) + + for lostCount := 1; lostCount <= int(rule.ParityPartNum); lostCount++ { + for _, lostIdxs := range islices.IndexCombos(len(parts), lostCount) { + res, err := iec.Decode(rule, ln, islices.NilTwoDimSliceElements(parts, lostIdxs)) + require.NoError(t, err) + require.Equal(t, data, res) + } + } + + for _, lostIdxs := range islices.IndexCombos(len(parts), int(rule.ParityPartNum)+1) { + _, err := iec.Decode(rule, ln, islices.NilTwoDimSliceElements(parts, lostIdxs)) + require.ErrorContains(t, err, "restore Reed-Solomon") + require.ErrorIs(t, err, reedsolomon.ErrTooFewShards) + } +} + +func TestEncode(t *testing.T) { + rules := []iec.Rule{ + {DataPartNum: 3, ParityPartNum: 1}, + {DataPartNum: 12, ParityPartNum: 4}, + } + + data := testutil.RandByteSlice(4 << 10) + + t.Run("empty", func(t *testing.T) { + for _, rule := range rules { + t.Run(rule.String(), func(t *testing.T) { + test := func(t *testing.T, data []byte) { + res, err := iec.Encode(rule, []byte{}) + require.NoError(t, err) + + total := int(rule.DataPartNum + rule.ParityPartNum) + require.Len(t, res, total) + require.EqualValues(t, total, islices.CountNilsInTwoDimSlice(res)) + } + test(t, nil) + test(t, []byte{}) + }) + } + }) + + for _, rule := range rules { + t.Run(rule.String(), func(t *testing.T) { + testEncode(t, rule, data) + }) + } +} diff --git a/internal/ec/object_test.go b/internal/ec/object_test.go new file mode 100644 index 0000000000..4110c5f040 --- /dev/null +++ b/internal/ec/object_test.go @@ -0,0 +1,174 @@ +package ec_test + +import ( + "crypto/sha256" + "math/rand/v2" + "testing" + + iec "github.com/nspcc-dev/neofs-node/internal/ec" + "github.com/nspcc-dev/neofs-node/internal/testutil" + "github.com/nspcc-dev/neofs-sdk-go/checksum" + cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + "github.com/nspcc-dev/neofs-sdk-go/object" + sessiontest "github.com/nspcc-dev/neofs-sdk-go/session/test" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" + "github.com/nspcc-dev/neofs-sdk-go/version" + "github.com/nspcc-dev/tzhash/tz" + "github.com/stretchr/testify/require" +) + +func TestGetPartInfo(t *testing.T) { + var obj object.Object + otherAttr := object.NewAttribute("any_attribute", "val") + + obj.SetAttributes(otherAttr) + + t.Run("missing", func(t *testing.T) { + pi, err := iec.GetPartInfo(obj) + require.NoError(t, err) + require.EqualValues(t, -1, pi.RuleIndex) + }) + + t.Run("failure", func(t *testing.T) { + for _, tc := range []struct { + name string + attrs map[string]string + assertErr func(t *testing.T, err error) + }{ + {name: "non-int rule index", + attrs: map[string]string{"__NEOFS__EC_RULE_IDX": "not_an_int", "__NEOFS__EC_PART_IDX": "456"}, + assertErr: func(t *testing.T, err error) { + require.ErrorContains(t, err, "invalid index attribute __NEOFS__EC_RULE_IDX: ") + require.ErrorContains(t, err, "invalid syntax") + }, + }, + {name: "negative rule index", + attrs: map[string]string{"__NEOFS__EC_RULE_IDX": "-123", "__NEOFS__EC_PART_IDX": "456"}, + assertErr: func(t *testing.T, err error) { + require.EqualError(t, err, "invalid index attribute __NEOFS__EC_RULE_IDX: negative value -123") + }, + }, + {name: "non-int part index", + attrs: map[string]string{"__NEOFS__EC_RULE_IDX": "123", "__NEOFS__EC_PART_IDX": "not_an_int"}, + assertErr: func(t *testing.T, err error) { + require.ErrorContains(t, err, "invalid index attribute __NEOFS__EC_PART_IDX: ") + require.ErrorContains(t, err, "invalid syntax") + }, + }, + {name: "negative part index", + attrs: map[string]string{"__NEOFS__EC_RULE_IDX": "123", "__NEOFS__EC_PART_IDX": "-456"}, + assertErr: func(t *testing.T, err error) { + require.EqualError(t, err, "invalid index attribute __NEOFS__EC_PART_IDX: negative value -456") + }, + }, + {name: "rule index without part index", + attrs: map[string]string{"__NEOFS__EC_RULE_IDX": "123"}, + assertErr: func(t *testing.T, err error) { + require.EqualError(t, err, "__NEOFS__EC_RULE_IDX attribute is set while __NEOFS__EC_PART_IDX is not") + }, + }, + {name: "part index without rule index", + attrs: map[string]string{"__NEOFS__EC_PART_IDX": "456"}, + assertErr: func(t *testing.T, err error) { + require.EqualError(t, err, "__NEOFS__EC_PART_IDX attribute is set while __NEOFS__EC_RULE_IDX is not") + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + attrs := make([]object.Attribute, 0, len(tc.attrs)/2) + for k, v := range tc.attrs { + attrs = append(attrs, object.NewAttribute(k, v)) + } + + obj.SetAttributes(append([]object.Attribute{otherAttr}, attrs...)...) + + _, err := iec.GetPartInfo(obj) + tc.assertErr(t, err) + }) + } + }) + + obj.SetAttributes( + otherAttr, + object.NewAttribute("__NEOFS__EC_RULE_IDX", "123"), + object.NewAttribute("__NEOFS__EC_PART_IDX", "456"), + ) + + pi, err := iec.GetPartInfo(obj) + require.NoError(t, err) + require.Equal(t, iec.PartInfo{RuleIndex: 123, Index: 456}, pi) +} + +func TestFormObjectForECPart(t *testing.T) { + ver := version.Current() + st := sessiontest.Object() + signer := neofscryptotest.Signer() + + var parent object.Object + parent.SetVersion(&ver) + parent.SetContainerID(cidtest.ID()) + parent.SetOwner(usertest.ID()) + parent.SetCreationEpoch(rand.Uint64()) + parent.SetType(object.Type(rand.Int32())) + parent.SetSessionToken(&st) + require.NoError(t, parent.SetVerificationFields(signer)) + + partInfo := iec.PartInfo{RuleIndex: 123, Index: 456} + part := testutil.RandByteSlice(32) + + t.Run("signer failure", func(t *testing.T) { + signer := neofscryptotest.FailSigner(signer) + _, sigErr := signer.Sign(nil) + require.Error(t, sigErr) + + _, err := iec.FormObjectForECPart(signer, parent, part, partInfo) + require.ErrorContains(t, err, "set verification fields: could not set signature:") + require.ErrorContains(t, err, sigErr.Error()) + }) + + obj, err := iec.FormObjectForECPart(signer, parent, part, partInfo) + require.NoError(t, err) + + require.NoError(t, obj.VerifyID()) + require.True(t, obj.VerifySignature()) + + require.True(t, obj.HasParent()) + require.NotNil(t, obj.Parent()) + require.Equal(t, parent, *obj.Parent()) + + require.Equal(t, part, obj.Payload()) + require.EqualValues(t, len(part), obj.PayloadSize()) + + pcs, ok := obj.PayloadChecksum() + require.True(t, ok) + require.Equal(t, checksum.NewSHA256(sha256.Sum256(part)), pcs) + + require.Equal(t, parent.Version(), obj.Version()) + require.Equal(t, parent.GetContainerID(), obj.GetContainerID()) + require.Equal(t, parent.Owner(), obj.Owner()) + require.Equal(t, parent.CreationEpoch(), obj.CreationEpoch()) + require.Equal(t, object.TypeRegular, obj.Type()) + require.Equal(t, parent.SessionToken(), obj.SessionToken()) + + _, ok = obj.PayloadHomomorphicHash() + require.False(t, ok) + + require.Len(t, obj.Attributes(), 2) + + pi, err := iec.GetPartInfo(obj) + require.NoError(t, err) + require.Equal(t, partInfo, pi) + + t.Run("with homomorphic hash", func(t *testing.T) { + anyHash := checksum.NewTillichZemor([tz.Size]byte{1, 2, 3}) + parent.SetPayloadHomomorphicHash(anyHash) + + obj, err := iec.FormObjectForECPart(signer, parent, part, partInfo) + require.NoError(t, err) + + phh, ok := obj.PayloadHomomorphicHash() + require.True(t, ok) + require.Equal(t, checksum.NewTillichZemor(tz.Sum(part)), phh) + }) +} diff --git a/internal/ec/objects.go b/internal/ec/objects.go new file mode 100644 index 0000000000..4dd4693fd4 --- /dev/null +++ b/internal/ec/objects.go @@ -0,0 +1,75 @@ +package ec + +import ( + "fmt" + + iobject "github.com/nspcc-dev/neofs-node/internal/object" + "github.com/nspcc-dev/neofs-sdk-go/checksum" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + "github.com/nspcc-dev/neofs-sdk-go/object" + "github.com/nspcc-dev/tzhash/tz" +) + +// PartInfo groups information about single EC part produced according to some [Rule]. +type PartInfo struct { + // Index of EC rule in the container storage policy. + RuleIndex int + // Part index. + Index int +} + +// GetPartInfo fetches EC part info from given object header. It one of +// [AttributeRuleIdx] or [AttributeRuleIdx] attributes is set, the other must be +// set too. If both are missing, GetPartInfo returns [PartInfo.RuleIndex] = -1 +// without error. +func GetPartInfo(obj object.Object) (PartInfo, error) { + ruleIdx, err := iobject.GetIndexAttribute(obj, AttributeRuleIdx) + if err != nil { + return PartInfo{}, fmt.Errorf("invalid index attribute %s: %w", AttributeRuleIdx, err) + } + + partIdx, err := iobject.GetIndexAttribute(obj, AttributePartIdx) + if err != nil { + return PartInfo{}, fmt.Errorf("invalid index attribute %s: %w", AttributePartIdx, err) + } + + if ruleIdx < 0 { + if partIdx >= 0 { + return PartInfo{}, fmt.Errorf("%s attribute is set while %s is not", AttributePartIdx, AttributeRuleIdx) + } + } else if partIdx < 0 { + return PartInfo{}, fmt.Errorf("%s attribute is set while %s is not", AttributeRuleIdx, AttributePartIdx) + } + + return PartInfo{ + RuleIndex: ruleIdx, + Index: partIdx, + }, nil +} + +// FormObjectForECPart forms object for EC part produced from given parent object. +func FormObjectForECPart(signer neofscrypto.Signer, parent object.Object, part []byte, partInfo PartInfo) (object.Object, error) { + var obj object.Object + obj.SetVersion(parent.Version()) + obj.SetContainerID(parent.GetContainerID()) + obj.SetOwner(parent.Owner()) + obj.SetCreationEpoch(parent.CreationEpoch()) + obj.SetType(object.TypeRegular) + obj.SetSessionToken(parent.SessionToken()) + + obj.SetParent(&parent) + iobject.SetIntAttribute(&obj, AttributeRuleIdx, partInfo.RuleIndex) + iobject.SetIntAttribute(&obj, AttributePartIdx, partInfo.Index) + + obj.SetPayload(part) + obj.SetPayloadSize(uint64(len(part))) + if _, ok := parent.PayloadHomomorphicHash(); ok { + obj.SetPayloadHomomorphicHash(checksum.NewTillichZemor(tz.Sum(part))) + } + + if err := obj.SetVerificationFields(signer); err != nil { + return object.Object{}, fmt.Errorf("set verification fields: %w", err) + } + + return obj, nil +} diff --git a/internal/object/attributes.go b/internal/object/attributes.go new file mode 100644 index 0000000000..8e6109166f --- /dev/null +++ b/internal/object/attributes.go @@ -0,0 +1,80 @@ +package object + +import ( + "errors" + "fmt" + "strconv" + + "github.com/nspcc-dev/neofs-sdk-go/object" +) + +// ErrAttributeNotFound is returned when some object attribute not found. +var ErrAttributeNotFound = errors.New("attribute not found") + +// GetIndexAttribute looks up for specified index attribute in the given object +// header. Returns -1 if the attribute is missing. +// +// GetIndexAttribute ignores all attribute values except the first. +// +// Note that if attribute exists but negative, GetIndexAttribute returns error. +func GetIndexAttribute(hdr object.Object, attr string) (int, error) { + i, err := GetIntAttribute(hdr, attr) + if err != nil { + if errors.Is(err, ErrAttributeNotFound) { + return -1, nil + } + return 0, err + } + + if i < 0 { + return 0, fmt.Errorf("negative value %d", i) + } + + return i, nil +} + +// GetIntAttribute looks up for specified int attribute in the given object +// header. Returns [ErrAttributeNotFound] if the attribute is missing. +// +// GetIntAttribute ignores all attribute values except the first. +func GetIntAttribute(hdr object.Object, attr string) (int, error) { + if s := GetAttribute(hdr, attr); s != "" { + return strconv.Atoi(s) + } + return 0, ErrAttributeNotFound +} + +// GetAttribute looks up for specified attribute in the given object header. +// Returns empty string if the attribute is missing. +// +// GetIntAttribute ignores all attribute values except the first. +func GetAttribute(hdr object.Object, attr string) string { + attrs := hdr.Attributes() + for i := range attrs { + if attrs[i].Key() == attr { + return attrs[i].Value() + } + } + return "" +} + +// SetIntAttribute sets int value for the object attribute. If the attribute +// already exists, SetIntAttribute overwrites its value. +func SetIntAttribute(dst *object.Object, attr string, val int) { + SetAttribute(dst, attr, strconv.Itoa(val)) +} + +// SetAttribute sets value for the object attribute. If the attribute already +// exists, SetAttribute overwrites its value. +func SetAttribute(dst *object.Object, attr, val string) { + attrs := dst.Attributes() + for i := range attrs { + if attrs[i].Key() == attr { + attrs[i].SetValue(val) + dst.SetAttributes(attrs...) + return + } + } + + dst.SetAttributes(append(attrs, object.NewAttribute(attr, val))...) +} diff --git a/internal/object/attributes_test.go b/internal/object/attributes_test.go new file mode 100644 index 0000000000..88c6412dce --- /dev/null +++ b/internal/object/attributes_test.go @@ -0,0 +1,174 @@ +package object_test + +import ( + "strconv" + "testing" + + iobject "github.com/nspcc-dev/neofs-node/internal/object" + "github.com/nspcc-dev/neofs-sdk-go/object" + "github.com/stretchr/testify/require" +) + +func TestGetIndexAttribute(t *testing.T) { + var obj object.Object + const attr = "attr" + + t.Run("missing", func(t *testing.T) { + i, err := iobject.GetIndexAttribute(obj, attr) + require.NoError(t, err) + require.EqualValues(t, -1, i) + }) + + t.Run("not an integer", func(t *testing.T) { + obj.SetAttributes(object.NewAttribute(attr, "not_an_int")) + + _, err := iobject.GetIndexAttribute(obj, attr) + require.ErrorContains(t, err, "invalid syntax") + }) + + t.Run("negative", func(t *testing.T) { + obj.SetAttributes(object.NewAttribute(attr, "-123")) + + _, err := iobject.GetIndexAttribute(obj, attr) + require.EqualError(t, err, "negative value -123") + }) + + obj.SetAttributes(object.NewAttribute(attr, "1234567890")) + + i, err := iobject.GetIndexAttribute(obj, attr) + require.NoError(t, err) + require.EqualValues(t, 1234567890, i) + + t.Run("multiple", func(t *testing.T) { + for _, s := range []string{ + "not_an_int", + "-1", + "2", + } { + obj.SetAttributes( + object.NewAttribute(attr, "1"), + object.NewAttribute(attr, s), + ) + + i, err := iobject.GetIndexAttribute(obj, attr) + require.NoError(t, err) + require.EqualValues(t, 1, i) + } + }) +} + +func TestGetIntAttribute(t *testing.T) { + var obj object.Object + const attr = "attr" + + t.Run("missing", func(t *testing.T) { + _, err := iobject.GetIntAttribute(obj, attr) + require.ErrorIs(t, err, iobject.ErrAttributeNotFound) + }) + + t.Run("not an integer", func(t *testing.T) { + obj.SetAttributes(object.NewAttribute(attr, "not_an_int")) + + _, err := iobject.GetIntAttribute(obj, attr) + require.ErrorContains(t, err, "invalid syntax") + }) + + for _, tc := range []struct { + s string + i int + }{ + {s: "1234567890", i: 1234567890}, + {s: "0", i: 0}, + {s: "-1234567890", i: -1234567890}, + } { + obj.SetAttributes(object.NewAttribute(attr, tc.s)) + + i, err := iobject.GetIntAttribute(obj, attr) + require.NoError(t, err, tc.s) + require.EqualValues(t, tc.i, i) + } + + t.Run("multiple", func(t *testing.T) { + for _, s := range []string{ + "not_an_int", + "-1", + "2", + } { + obj.SetAttributes( + object.NewAttribute(attr, "1"), + object.NewAttribute(attr, s), + ) + + i, err := iobject.GetIntAttribute(obj, attr) + require.NoError(t, err) + require.EqualValues(t, 1, i) + } + }) +} + +func TestGetAttribute(t *testing.T) { + var obj object.Object + const attr = "attr" + + t.Run("missing", func(t *testing.T) { + require.Empty(t, iobject.GetAttribute(obj, attr)) + }) + + obj.SetAttributes(object.NewAttribute(attr, "val")) + require.Equal(t, "val", iobject.GetAttribute(obj, attr)) + + t.Run("multiple", func(t *testing.T) { + obj.SetAttributes( + object.NewAttribute(attr, "val1"), + object.NewAttribute(attr, "val2"), + ) + + require.Equal(t, "val1", iobject.GetAttribute(obj, attr)) + }) +} + +func TestSetIntAttribute(t *testing.T) { + var obj object.Object + const attr = "attr" + + obj.SetAttributes(object.NewAttribute(attr+"_other", "val")) + + check := func(t *testing.T, val int) { + iobject.SetIntAttribute(&obj, attr, val) + + attrs := obj.Attributes() + require.Len(t, attrs, 2) + require.Equal(t, attr, attrs[1].Key()) + require.Equal(t, strconv.Itoa(val), attrs[1].Value()) + + got, err := iobject.GetIntAttribute(obj, attr) + require.NoError(t, err, val) + require.EqualValues(t, val, got) + } + + check(t, 1234567890) + check(t, 0) + check(t, -1234567890) +} + +func TestSetAttribute(t *testing.T) { + var obj object.Object + const attr = "attr" + + obj.SetAttributes(object.NewAttribute(attr+"_other", "val")) + + check := func(t *testing.T, val string) { + iobject.SetAttribute(&obj, attr, val) + + attrs := obj.Attributes() + require.Len(t, attrs, 2) + require.Equal(t, attr, attrs[1].Key()) + require.Equal(t, val, attrs[1].Value()) + + got := iobject.GetAttribute(obj, attr) + require.Equal(t, val, got) + } + + check(t, "val1") + check(t, "val2") +} diff --git a/internal/slices/index.go b/internal/slices/index.go new file mode 100644 index 0000000000..69a0107e07 --- /dev/null +++ b/internal/slices/index.go @@ -0,0 +1,17 @@ +package slices + +import combinations "github.com/mxschmitt/golang-combinations" + +// IndexCombos returns all combinations of n indexes taken k. +func IndexCombos(n, k int) [][]int { + return combinations.Combinations(Indexes(n), k) +} + +// Indexes returns slices filled with n indexes. +func Indexes(n int) []int { + s := make([]int, n) + for i := range s { + s[i] = i + } + return s +} diff --git a/internal/slices/index_test.go b/internal/slices/index_test.go new file mode 100644 index 0000000000..404bf751bb --- /dev/null +++ b/internal/slices/index_test.go @@ -0,0 +1,24 @@ +package slices_test + +import ( + "testing" + + islices "github.com/nspcc-dev/neofs-node/internal/slices" + "github.com/stretchr/testify/require" +) + +func TestIndexes(t *testing.T) { + require.Empty(t, islices.Indexes(0)) + require.Equal(t, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, islices.Indexes(10)) +} + +func TestIndexCombos(t *testing.T) { + require.ElementsMatch(t, islices.IndexCombos(4, 2), [][]int{ + {0, 1}, + {0, 2}, + {0, 3}, + {1, 2}, + {1, 3}, + {2, 3}, + }) +} diff --git a/internal/slices/slices.go b/internal/slices/slices.go new file mode 100644 index 0000000000..1e81e9e22e --- /dev/null +++ b/internal/slices/slices.go @@ -0,0 +1,38 @@ +package slices + +import "slices" + +// TwoDimSliceElementCount returns sum len for ss. +func TwoDimSliceElementCount[E any](s [][]E) int { + var n int + for i := range s { + n += len(s[i]) + } + return n +} + +// NilTwoDimSliceElements returns clone of ss with nil-ed given indexes. +func NilTwoDimSliceElements[T any](s [][]T, idxs []int) [][]T { + if s == nil { + return nil + } + + c := make([][]T, len(s)) + for i := range c { + if !slices.Contains(idxs, i) { + c[i] = slices.Clone(s[i]) + } + } + return c +} + +// CountNilsInTwoDimSlice counts nil elements of s. +func CountNilsInTwoDimSlice[T any](s [][]T) int { + var n int + for i := range s { + if s[i] == nil { + n++ + } + } + return n +} diff --git a/internal/slices/slices_test.go b/internal/slices/slices_test.go new file mode 100644 index 0000000000..0cc125218b --- /dev/null +++ b/internal/slices/slices_test.go @@ -0,0 +1,40 @@ +package slices_test + +import ( + "testing" + + islices "github.com/nspcc-dev/neofs-node/internal/slices" + "github.com/stretchr/testify/require" +) + +func TestTwoDimElementCount(t *testing.T) { + require.Zero(t, islices.TwoDimSliceElementCount([][]int(nil))) + require.Zero(t, islices.TwoDimSliceElementCount(make([][]int, 10))) + require.EqualValues(t, 10, islices.TwoDimSliceElementCount([][]int{ + {1}, + {2, 3}, + {4, 5, 6}, + {7, 8, 9, 10}, + })) +} + +func TestNilTwoDimSliceElements(t *testing.T) { + require.Nil(t, islices.NilTwoDimSliceElements([][]int(nil), []int{1, 2, 3})) + require.Empty(t, islices.NilTwoDimSliceElements([][]int{}, []int{1, 2, 3})) + + excl := []int{1, 3} + res := islices.NilTwoDimSliceElements([][]int{ + {1}, + {2, 3}, + {4, 5, 6}, + {7, 8, 9, 10}, + }, excl) + + require.Equal(t, [][]int{ + {1}, + nil, + {4, 5, 6}, + nil, + }, res) + require.EqualValues(t, len(excl), islices.CountNilsInTwoDimSlice(res)) +} diff --git a/pkg/services/object/put/distibuted_test.go b/pkg/services/object/put/distibuted_test.go index a607f3c360..b731eafd3d 100644 --- a/pkg/services/object/put/distibuted_test.go +++ b/pkg/services/object/put/distibuted_test.go @@ -11,7 +11,6 @@ import ( cid "github.com/nspcc-dev/neofs-sdk-go/container/id" "github.com/nspcc-dev/neofs-sdk-go/netmap" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" "github.com/stretchr/testify/require" "go.uber.org/zap" @@ -32,26 +31,6 @@ func allocNodes(n []uint) [][]netmap.NodeInfo { return res } -type testContainerNodes struct { - objID oid.ID - - sortErr error - cnrNodes [][]netmap.NodeInfo - - primCounts []uint -} - -func (x testContainerNodes) Unsorted() [][]netmap.NodeInfo { return x.cnrNodes } - -func (x testContainerNodes) SortForObject(obj oid.ID) ([][]netmap.NodeInfo, error) { - if x.objID != obj { - return nil, errors.New("[test] unexpected object ID") - } - return x.cnrNodes, x.sortErr -} - -func (x testContainerNodes) PrimaryCounts() []uint { return x.primCounts } - type testNetwork struct { localPubKey []byte } @@ -95,15 +74,10 @@ func TestIterateNodesForObject(t *testing.T) { localPubKey: cnrNodes[0][2].PublicKey(), }, remotePool: &rwp, - containerNodes: testContainerNodes{ - objID: objID, - cnrNodes: cnrNodes, - primCounts: []uint{2, 3, 2}, - }, } var handlerMtx sync.Mutex var handlerCalls []nodeDesc - err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, []uint{2, 3, 2}, cnrNodes, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node) handlerMtx.Unlock() @@ -183,19 +157,14 @@ func TestIterateNodesForObject(t *testing.T) { cnrNodes := allocNodes([]uint{3, 3, 2}) cnrNodes[1][1].SetPublicKey(cnrNodes[0][1].PublicKey()) iter := placementIterator{ - log: zap.NewNop(), - neoFSNet: new(testNetwork), - remotePool: new(testWorkerPool), - containerNodes: testContainerNodes{ - objID: objID, - cnrNodes: cnrNodes, - primCounts: []uint{2, 1, 2}, - }, + log: zap.NewNop(), + neoFSNet: new(testNetwork), + remotePool: new(testWorkerPool), linearReplNum: 4, } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, []uint{2, 1, 2}, cnrNodes, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -227,15 +196,10 @@ func TestIterateNodesForObject(t *testing.T) { log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: new(testWorkerPool), - containerNodes: testContainerNodes{ - objID: objID, - cnrNodes: cnrNodes, - primCounts: []uint{1, 1, 1}, - }, } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, true, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, []uint{1, 1, 1}, cnrNodes, true, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -259,21 +223,6 @@ func TestIterateNodesForObject(t *testing.T) { }, key) } }) - t.Run("sort nodes for object failure", func(t *testing.T) { - objID := oidtest.ID() - iter := placementIterator{ - log: zap.NewNop(), - containerNodes: testContainerNodes{ - objID: objID, - sortErr: errors.New("any sort error"), - }, - } - err := iter.iterateNodesForObject(objID, false, func(nodeDesc) error { - t.Fatal("must not be called") - return nil - }) - require.EqualError(t, err, "sort container nodes for the object: any sort error") - }) t.Run("worker pool failure", func(t *testing.T) { // nodes: [A B] [C D E] [F] // policy: [2 2 1] @@ -289,15 +238,10 @@ func TestIterateNodesForObject(t *testing.T) { log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: &wp, - containerNodes: testContainerNodes{ - objID: objID, - cnrNodes: cnrNodes, - primCounts: []uint{2, 2, 1}, - }, } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, []uint{2, 2, 1}, cnrNodes, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -333,15 +277,10 @@ func TestIterateNodesForObject(t *testing.T) { log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: &wp, - containerNodes: testContainerNodes{ - objID: objID, - cnrNodes: cnrNodes, - primCounts: []uint{2, 4, 1}, - }, } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, []uint{2, 4, 1}, cnrNodes, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -371,15 +310,10 @@ func TestIterateNodesForObject(t *testing.T) { log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: &wp, - containerNodes: testContainerNodes{ - objID: objID, - cnrNodes: cnrNodes, - primCounts: []uint{2, 2, 1}, - }, } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, []uint{2, 2, 1}, cnrNodes, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -412,15 +346,10 @@ func TestIterateNodesForObject(t *testing.T) { log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: &wp, - containerNodes: testContainerNodes{ - objID: objID, - cnrNodes: cnrNodes, - primCounts: []uint{2, 2, 1}, - }, } var handlerMtx sync.Mutex var handlerCalls [][]byte - err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, []uint{2, 2, 1}, cnrNodes, false, func(node nodeDesc) error { handlerMtx.Lock() handlerCalls = append(handlerCalls, node.info.PublicKey()) handlerMtx.Unlock() @@ -451,16 +380,11 @@ func TestIterateNodesForObject(t *testing.T) { err: errors.New("pool err"), nFail: 2, }, - containerNodes: testContainerNodes{ - objID: objID, - cnrNodes: cnrNodes, - primCounts: []uint{2, 3, 1}, - }, } blockCh := make(chan struct{}) returnCh := make(chan struct{}) go func() { - err := iter.iterateNodesForObject(objID, false, func(node nodeDesc) error { + err := iter.iterateNodesForObject(objID, []uint{2, 3, 1}, cnrNodes, false, func(node nodeDesc) error { <-blockCh return nil }) diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index 52a0a58c32..3c376663b0 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -11,6 +11,7 @@ import ( "sync" "sync/atomic" + iec "github.com/nspcc-dev/neofs-node/internal/ec" "github.com/nspcc-dev/neofs-node/pkg/core/client" netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap" "github.com/nspcc-dev/neofs-node/pkg/core/object" @@ -44,8 +45,10 @@ type distributedTarget struct { objSharedMeta []byte collectedSignatures [][]byte + containerNodes ContainerNodes localNodeInContainer bool localNodeSigner neofscrypto.Signer + sessionSigner neofscrypto.Signer // - object if localOnly // - replicate request if localNodeInContainer // - payload otherwise @@ -62,6 +65,10 @@ type distributedTarget struct { keyStorage *svcutil.KeyStorage localOnly bool + + // When object from request is an EC part, ecPart.RuleIndex is >= 0. + // Undefined when policy have no EC rules. + ecPart iec.PartInfo } type nodeDesc struct { @@ -125,7 +132,6 @@ func (t *distributedTarget) Close() (oid.ID, error) { defer func() { putPayload(t.encodedObject.b) t.encodedObject.b = nil - t.collectedSignatures = nil }() t.obj.SetPayload(t.encodedObject.b[t.encodedObject.pldOff:]) @@ -154,6 +160,51 @@ func (t *distributedTarget) Close() (oid.ID, error) { } func (t *distributedTarget) saveObject(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject) error { + if t.localOnly && t.sessionSigner == nil { + return t.distributeObject(obj, objMeta, encObj, nil) + } + + objNodeLists, err := t.containerNodes.SortForObject(t.obj.GetID()) + if err != nil { + return fmt.Errorf("sort container nodes by object ID: %w", err) + } + + // TODO: handle rules in parallel. https://github.com/nspcc-dev/neofs-node/issues/3503 + + repRules := t.containerNodes.PrimaryCounts() + if len(repRules) > 0 { + typ := obj.Type() + broadcast := typ == objectSDK.TypeTombstone || typ == objectSDK.TypeLink || typ == objectSDK.TypeLock || len(obj.Children()) > 0 + return t.distributeObject(obj, objMeta, encObj, func(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject) error { + return t.placementIterator.iterateNodesForObject(obj.GetID(), repRules, objNodeLists, broadcast, func(node nodeDesc) error { + return t.sendObject(obj, objMeta, encObj, node) + }) + }) + } + + if ecRules := t.containerNodes.ECRules(); len(ecRules) > 0 { + if t.ecPart.RuleIndex >= 0 { // already encoded EC part + total := int(ecRules[t.ecPart.RuleIndex].DataPartNum + ecRules[t.ecPart.RuleIndex].ParityPartNum) + nodes := objNodeLists[len(repRules)+t.ecPart.RuleIndex] + return t.saveECPart(obj, objMeta, encObj, t.ecPart.Index, total, nodes) + } + + if t.sessionSigner != nil { + if err := t.ecAndSaveObject(t.sessionSigner, obj, ecRules, objNodeLists[len(repRules):]); err != nil { + return err + } + } + } + + return nil +} + +func (t *distributedTarget) distributeObject(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject, + placementFn func(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject) error) error { + defer func() { + t.collectedSignatures = nil + }() + if t.localNodeInContainer && t.metainfoConsistencyAttr != "" { t.objSharedMeta = t.encodeObjectMetadata(obj) } @@ -170,11 +221,7 @@ func (t *distributedTarget) saveObject(obj objectSDK.Object, objMeta object.Cont err = errIncompletePut{singleErr: fmt.Errorf("%w (last node error: %w)", errNotEnoughNodes{required: 1}, err)} } } else { - typ := obj.Type() - broadcast := typ == objectSDK.TypeTombstone || typ == objectSDK.TypeLink || typ == objectSDK.TypeLock || len(obj.Children()) > 0 - err = t.placementIterator.iterateNodesForObject(id, broadcast, func(node nodeDesc) error { - return t.sendObject(obj, objMeta, encObj, node) - }) + err = placementFn(obj, objMeta, encObj) } if err != nil { return err @@ -385,25 +432,17 @@ type placementIterator struct { neoFSNet NeoFSNetwork remotePool util.WorkerPool /* request-dependent */ - containerNodes ContainerNodes // when non-zero, this setting simplifies the object's storage policy // requirements to a fixed number of object replicas to be retained linearReplNum uint } -func (x placementIterator) iterateNodesForObject(obj oid.ID, broadcast bool, f func(nodeDesc) error) error { - var replCounts []uint +func (x placementIterator) iterateNodesForObject(obj oid.ID, replCounts []uint, nodeLists [][]netmap.NodeInfo, broadcast bool, f func(nodeDesc) error) error { var l = x.log.With(zap.Stringer("oid", obj)) - nodeLists, err := x.containerNodes.SortForObject(obj) - if err != nil { - return fmt.Errorf("sort container nodes for the object: %w", err) - } if x.linearReplNum > 0 { ns := slices.Concat(nodeLists...) nodeLists = [][]netmap.NodeInfo{ns} replCounts = []uint{x.linearReplNum} - } else { - replCounts = x.containerNodes.PrimaryCounts() } var processedNodesMtx sync.RWMutex var nextNodeGroupKeys []string @@ -447,7 +486,7 @@ func (x placementIterator) iterateNodesForObject(obj oid.ID, broadcast bool, f f // latency and volume of "unfinished" data to be garbage-collected. Also after // the failure of any of the nodes the ability to comply with the policy // requirements may be lost. - for i := range nodeLists { + for i := range replCounts { listInd := i for { replRem := replCounts[listInd] - nodesCounters[listInd].stored @@ -456,7 +495,7 @@ func (x placementIterator) iterateNodesForObject(obj oid.ID, broadcast bool, f f } listLen := uint(len(nodeLists[listInd])) if listLen-nodesCounters[listInd].processed < replRem { - err = errNotEnoughNodes{listIndex: listInd, required: replRem, left: listLen - nodesCounters[listInd].processed} + var err error = errNotEnoughNodes{listIndex: listInd, required: replRem, left: listLen - nodesCounters[listInd].processed} if e, _ := lastRespErr.Load().(error); e != nil { err = fmt.Errorf("%w (last node error: %w)", err, e) } @@ -492,7 +531,7 @@ func (x placementIterator) iterateNodesForObject(obj oid.ID, broadcast bool, f f l.Error("failed to decode network endpoints of the storage node from the network map, skip the node", zap.String("public key", netmap.StringifyPublicKey(nodeLists[listInd][j])), zap.Error(nr.convertErr)) if listLen-nodesCounters[listInd].processed-1 < replRem { // -1 includes current node failure - err = fmt.Errorf("%w (last node error: failed to decode network addresses: %w)", + err := fmt.Errorf("%w (last node error: failed to decode network addresses: %w)", errNotEnoughNodes{listIndex: listInd, required: replRem, left: listLen - nodesCounters[listInd].processed - 1}, nr.convertErr) return errIncompletePut{singleErr: err} diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/put/ec.go new file mode 100644 index 0000000000..324a1722dd --- /dev/null +++ b/pkg/services/object/put/ec.go @@ -0,0 +1,135 @@ +package putsvc + +import ( + "fmt" + "math" + "slices" + + iec "github.com/nspcc-dev/neofs-node/internal/ec" + objectcore "github.com/nspcc-dev/neofs-node/pkg/core/object" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + "github.com/nspcc-dev/neofs-sdk-go/netmap" + "github.com/nspcc-dev/neofs-sdk-go/object" + "go.uber.org/zap" +) + +func (t *distributedTarget) ecAndSaveObject(signer neofscrypto.Signer, obj object.Object, ecRules []iec.Rule, nodeLists [][]netmap.NodeInfo) error { + for i := range ecRules { + if slices.Contains(ecRules[:i], ecRules[i]) { // has already been processed, see below + continue + } + + payloadParts, err := iec.Encode(ecRules[i], obj.Payload()) + if err != nil { + return fmt.Errorf("split object payload into EC parts for rule #%d (%s): %w", i, ecRules[i], err) + } + + if err := t.applyECRule(signer, obj, i, payloadParts, nodeLists[i]); err != nil { + return fmt.Errorf("apply EC rule #%d (%s): %w", i, ecRules[i], err) + } + + for j := i + 1; j < len(ecRules); j++ { + if ecRules[i] != ecRules[j] { + continue + } + if err := t.applyECRule(signer, obj, j, payloadParts, nodeLists[j]); err != nil { + return fmt.Errorf("apply EC rule #%d (%s): %w", j, ecRules[j], err) + } + } + } + + return nil +} + +func (t *distributedTarget) applyECRule(signer neofscrypto.Signer, obj object.Object, ruleIdx int, payloadParts [][]byte, nodeList []netmap.NodeInfo) error { + for partIdx := range payloadParts { + // TODO: each part is handled independently, so this worth concurrent execution. https://github.com/nspcc-dev/neofs-node/issues/3504 + // Note that distributeTarget.distributeObject is not thread-safe. + if err := t.formAndSaveObjectForECPart(signer, obj, ruleIdx, partIdx, payloadParts, nodeList); err != nil { + return fmt.Errorf("form and save object for part %d: %w", partIdx, err) + } + } + + return nil +} + +func (t *distributedTarget) formAndSaveObjectForECPart(signer neofscrypto.Signer, obj object.Object, ruleIdx, partIdx int, payloadParts [][]byte, nodeList []netmap.NodeInfo) error { + partObj, err := iec.FormObjectForECPart(signer, obj, payloadParts[partIdx], iec.PartInfo{ + RuleIndex: ruleIdx, + Index: partIdx, + }) + if err != nil { + return fmt.Errorf("form object for part: %w", err) + } + + var encObj encodedObject + // similar to pkg/services/object/put/distributed.go:95 + if t.localNodeInContainer { + payloadLen := partObj.PayloadSize() + if payloadLen > math.MaxInt { + return fmt.Errorf("too big payload of physically stored for this server %d > %d", payloadLen, math.MaxInt) + } + + hdr := partObj + hdr.SetPayload(nil) + + if t.localOnly { + encObj, err = encodeObjectWithoutPayload(hdr, int(payloadLen)) + } else { + encObj, err = encodeReplicateRequestWithoutPayload(t.localNodeSigner, hdr, int(payloadLen), t.metainfoConsistencyAttr != "") + } + if err != nil { + return fmt.Errorf("encode object into binary: %w", err) + } + + defer putPayload(encObj.b) + + encObj.b = append(encObj.b, partObj.Payload()...) + } + + if err := t.saveECPart(partObj, objectcore.ContentMeta{}, encObj, partIdx, len(payloadParts), nodeList); err != nil { + return fmt.Errorf("save part object: %w", err) + } + + return nil +} + +func (t *distributedTarget) saveECPart(part object.Object, objMeta objectcore.ContentMeta, encObj encodedObject, idx, total int, nodeList []netmap.NodeInfo) error { + return t.distributeObject(part, objMeta, encObj, func(obj object.Object, objMeta objectcore.ContentMeta, encObj encodedObject) error { + return t.distributeECPart(obj, objMeta, encObj, idx, total, nodeList) + }) +} + +func (t *distributedTarget) distributeECPart(part object.Object, objMeta objectcore.ContentMeta, enc encodedObject, idx, total int, nodeList []netmap.NodeInfo) error { + var firstErr error + for { + err := t.saveECPartOnNode(part, objMeta, enc, nodeList[idx]) + if err == nil { + return nil + } + + na := slices.Collect(nodeList[idx].NetworkEndpoints()) + if firstErr == nil { + firstErr = fmt.Errorf("save on SN #%d (%s): %w", idx, na, err) + } else { + t.placementIterator.log.Info("failed to save EC part on reserve SN", zap.Error(err), zap.Strings("addresses", na)) + } + + if idx += total; idx >= len(nodeList) { + return errIncompletePut{singleErr: firstErr} + } + } +} + +func (t *distributedTarget) saveECPartOnNode(obj object.Object, objMeta objectcore.ContentMeta, enc encodedObject, node netmap.NodeInfo) error { + var n nodeDesc + n.local = t.placementIterator.neoFSNet.IsLocalNodePublicKey(node.PublicKey()) + if !n.local { + var err error + if n.info, err = convertNodeInfo(node); err != nil { + return fmt.Errorf("convert node info: %w", err) + } + } + + return t.sendObject(obj, objMeta, enc, n) +} diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go index 106b1ed966..58761e3734 100644 --- a/pkg/services/object/put/prm.go +++ b/pkg/services/object/put/prm.go @@ -1,6 +1,7 @@ package putsvc import ( + iec "github.com/nspcc-dev/neofs-node/internal/ec" "github.com/nspcc-dev/neofs-node/pkg/core/client" "github.com/nspcc-dev/neofs-node/pkg/services/object/util" containerSDK "github.com/nspcc-dev/neofs-sdk-go/container" @@ -20,9 +21,11 @@ type PutInitPrm struct { relay func(client.NodeInfo, client.MultiAddressClient) error containerNodes ContainerNodes + ecPart iec.PartInfo localNodeInContainer bool localSignerRFC6979 neofscrypto.Signer localNodeSigner neofscrypto.Signer + sessionSigner neofscrypto.Signer } type PutChunkPrm struct { diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index b556a579ff..e29cb1f011 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -3,6 +3,7 @@ package putsvc import ( "context" + iec "github.com/nspcc-dev/neofs-node/internal/ec" "github.com/nspcc-dev/neofs-node/pkg/core/client" "github.com/nspcc-dev/neofs-node/pkg/core/container" "github.com/nspcc-dev/neofs-node/pkg/core/netmap" @@ -51,6 +52,8 @@ type ContainerNodes interface { // matching storage policy of the container. Nodes are identified by their // public keys and can be repeated in different sets. // + // First PrimaryCounts() sets are for replication, the rest are for ECRules(). + // // Unsorted callers do not change resulting slices and their elements. Unsorted() [][]netmapsdk.NodeInfo // SortForObject sorts container nodes for the referenced object's storage. @@ -63,6 +66,11 @@ type ContainerNodes interface { // - first N nodes of each L are primary data holders while others (if any) // are backup. PrimaryCounts() []uint + // ECRules returns list of erasure coding rules for all objects in the + // container. Same rule may repeat. + // + // ECRules callers do not change resulting slice. + ECRules() []iec.Rule } // NeoFSNetwork provides access to the NeoFS network to get information diff --git a/pkg/services/object/put/service_test.go b/pkg/services/object/put/service_test.go index d912150be1..e3e72c9d79 100644 --- a/pkg/services/object/put/service_test.go +++ b/pkg/services/object/put/service_test.go @@ -2,16 +2,22 @@ package putsvc import ( "bytes" + "cmp" "context" "crypto/sha256" "errors" "fmt" "io" "slices" + "strconv" "sync" "testing" "github.com/google/uuid" + "github.com/klauspost/reedsolomon" + iec "github.com/nspcc-dev/neofs-node/internal/ec" + iobject "github.com/nspcc-dev/neofs-node/internal/object" + islices "github.com/nspcc-dev/neofs-node/internal/slices" "github.com/nspcc-dev/neofs-node/internal/testutil" clientcore "github.com/nspcc-dev/neofs-node/pkg/core/client" objutil "github.com/nspcc-dev/neofs-node/pkg/services/object/util" @@ -68,6 +74,37 @@ func Test_Slicing_REP3(t *testing.T) { } } +func Test_Slicing_EC(t *testing.T) { + rules := []iec.Rule{ + {DataPartNum: 2, ParityPartNum: 2}, + {DataPartNum: 3, ParityPartNum: 1}, + {DataPartNum: 6, ParityPartNum: 3}, + {DataPartNum: 12, ParityPartNum: 4}, + } + + for _, tc := range []struct { + name string + ln uint64 + skip string + }{ + {name: "no payload", ln: 0}, + {name: "1B", ln: 1}, + {name: "limit-1B", ln: maxObjectSize - 1}, + {name: "exactly limit", ln: maxObjectSize}, + {name: "limit+1b", ln: maxObjectSize + 1, skip: "https://github.com/nspcc-dev/neofs-node/issues/3500"}, + {name: "limitX2", ln: maxObjectSize * 2, skip: "https://github.com/nspcc-dev/neofs-node/issues/3500"}, + {name: "limitX4-1", ln: maxObjectSize + 4 - 1, skip: "https://github.com/nspcc-dev/neofs-node/issues/3500"}, + {name: "limitX5", ln: maxObjectSize * 5, skip: "https://github.com/nspcc-dev/neofs-node/issues/3500"}, + } { + t.Run(tc.name, func(t *testing.T) { + if tc.skip != "" { + t.Skip(tc.skip) + } + testSlicingECRules(t, tc.ln, rules) + }) + } +} + func testSlicingREP3(t *testing.T, ln uint64) { const repNodes = 3 const cnrReserveNodes = 2 @@ -146,6 +183,75 @@ func testSlicingREP3(t *testing.T, ln uint64) { } } +func testSlicingECRules(t *testing.T, ln uint64, rules []iec.Rule) { + maxRule := slices.MaxFunc(rules, func(a, b iec.Rule) int { + return cmp.Compare(a.DataPartNum+a.ParityPartNum, b.DataPartNum+b.ParityPartNum) + }) + + maxTotalParts := int(maxRule.DataPartNum + maxRule.ParityPartNum) + const cnrReserveNodes = 2 + const outCnrNodes = 2 + + cluster := newTestClusterForRepPolicy(t, uint(maxTotalParts), cnrReserveNodes, outCnrNodes) + for i := range cluster.nodeNetworks { + // TODO: add alternative to newTestClusterForRepPolicy for EC instead + cluster.nodeNetworks[i].cnrNodes.repCounts = nil + for range len(rules) - 1 { + cluster.nodeNetworks[i].cnrNodes.unsorted = append(cluster.nodeNetworks[i].cnrNodes.unsorted, cluster.nodeNetworks[i].cnrNodes.unsorted[0]) + cluster.nodeNetworks[i].cnrNodes.sorted = append(cluster.nodeNetworks[i].cnrNodes.sorted, cluster.nodeNetworks[i].cnrNodes.sorted[0]) + } + cluster.nodeNetworks[i].cnrNodes.ecRules = rules + } + + var srcObj object.Object + srcObj.SetContainerID(cidtest.ID()) + srcObj.SetOwner(usertest.ID()) + srcObj.SetAttributes( + object.NewAttribute("attr1", "val1"), + object.NewAttribute("attr2", "val2"), + ) + + var sessionToken session.Object + sessionToken.SetID(uuid.New()) + sessionToken.SetExp(1) + sessionToken.BindContainer(cidtest.ID()) + srcObj.SetPayload(testutil.RandByteSlice(ln)) + + testThroughNode := func(t *testing.T, idx int) { + sessionToken.SetAuthKey(cluster.nodeSessions[idx].signer.Public()) + require.NoError(t, sessionToken.Sign(usertest.User())) + + storeObjectWithSession(t, cluster.nodeServices[idx], srcObj, sessionToken) + + nodeObjLists := cluster.allStoredObjects() + + var restoredObj object.Object + if ln > maxObjectSize { + restoredObj = checkAndCutSplitECObject(t, ln, sessionToken, rules, nodeObjLists) + } else { + restoredObj = checkAndCutUnsplitECObject(t, rules, nodeObjLists) + } + + require.Zero(t, islices.TwoDimSliceElementCount(nodeObjLists)) + + assertObjectIntegrity(t, restoredObj) + require.Equal(t, sessionToken, *restoredObj.SessionToken()) + require.Equal(t, srcObj.GetContainerID(), restoredObj.GetContainerID()) + require.Equal(t, sessionToken.Issuer(), restoredObj.Owner()) + require.EqualValues(t, currentEpoch, restoredObj.CreationEpoch()) + require.Equal(t, object.TypeRegular, restoredObj.Type()) + require.Equal(t, srcObj.Attributes(), restoredObj.Attributes()) + require.False(t, restoredObj.HasParent()) + require.True(t, bytes.Equal(srcObj.Payload(), restoredObj.Payload())) + + cluster.resetAllStoredObjects() + } + + for i := range maxTotalParts + cnrReserveNodes + outCnrNodes { + testThroughNode(t, i) + } +} + func newTestClusterForRepPolicy(t *testing.T, repNodes, cnrReserveNodes, outCnrNodes uint) *testCluster { allNodes := allocNodes([]uint{repNodes + cnrReserveNodes + outCnrNodes})[0] cnrNodes := allNodes[:repNodes+cnrReserveNodes] @@ -166,7 +272,7 @@ func newTestClusterForRepPolicy(t *testing.T, repNodes, cnrReserveNodes, outCnrN for i := range allNodes { nodeKey := neofscryptotest.ECDSAPrivateKey() - nodeWorkerPool, err := ants.NewPool(10, ants.WithNonblocking(true)) + nodeWorkerPool, err := ants.NewPool(len(cnrNodes), ants.WithNonblocking(true)) require.NoError(t, err) cluster.nodeNetworks[i] = mockNetwork{ @@ -240,6 +346,7 @@ type mockContainerNodes struct { unsorted [][]netmap.NodeInfo sorted [][]netmap.NodeInfo repCounts []uint + ecRules []iec.Rule } func (x mockContainerNodes) Unsorted() [][]netmap.NodeInfo { @@ -254,6 +361,10 @@ func (x mockContainerNodes) PrimaryCounts() []uint { return x.repCounts } +func (x mockContainerNodes) ECRules() []iec.Rule { + return x.ecRules +} + type mockMaxSize uint64 func (x mockMaxSize) MaxObjectSize() uint64 { @@ -637,3 +748,154 @@ func assertObjectIntegrity(t *testing.T, obj object.Object) { require.Zero(t, obj.SplitID()) } + +func checkAndGetObjectFromECParts(t *testing.T, limit uint64, rule iec.Rule, parts []object.Object) object.Object { + require.Len(t, parts, int(rule.DataPartNum+rule.ParityPartNum)) + + for _, part := range parts { + assertObjectIntegrity(t, part) + require.LessOrEqual(t, part.PayloadSize(), limit) + } + + hdr := checkAndCutParentHeaderFromECPart(t, parts[0]) + + for i := 1; i < len(parts); i++ { + hdrI := checkAndCutParentHeaderFromECPart(t, parts[i]) + require.Equal(t, hdr, hdrI) + } + + payload := checkAndGetPayloadFromECParts(t, hdr.PayloadSize(), rule, parts) + + res := hdr + res.SetPayload(payload) + + return res +} + +func checkAndGetPayloadFromECParts(t *testing.T, ln uint64, rule iec.Rule, parts []object.Object) []byte { + var payloadParts [][]byte + for i := range parts { + payloadParts = append(payloadParts, parts[i].Payload()) + } + + if ln == 0 { + require.Negative(t, slices.IndexFunc(payloadParts, func(e []byte) bool { return len(e) > 0 })) + return nil + } + + enc, err := reedsolomon.New(int(rule.DataPartNum), int(rule.ParityPartNum)) + require.NoError(t, err) + + ok, err := enc.Verify(payloadParts) + require.NoError(t, err) + require.True(t, ok) + + required := make([]bool, rule.DataPartNum+rule.ParityPartNum) + for i := range rule.DataPartNum { + required[i] = true + } + + for lostCount := 1; lostCount <= int(rule.ParityPartNum); lostCount++ { + for _, lostIdxs := range islices.IndexCombos(len(payloadParts), lostCount) { + brokenParts := islices.NilTwoDimSliceElements(payloadParts, lostIdxs) + require.NoError(t, enc.Reconstruct(brokenParts)) + require.Equal(t, payloadParts, brokenParts) + + brokenParts = islices.NilTwoDimSliceElements(payloadParts, lostIdxs) + require.NoError(t, enc.ReconstructSome(brokenParts, required)) + require.Equal(t, payloadParts[:rule.DataPartNum], brokenParts[:rule.DataPartNum]) + } + } + + for _, lostIdxs := range islices.IndexCombos(len(payloadParts), int(rule.ParityPartNum)+1) { + require.Error(t, enc.Reconstruct(islices.NilTwoDimSliceElements(payloadParts, lostIdxs))) + require.Error(t, enc.ReconstructSome(islices.NilTwoDimSliceElements(payloadParts, lostIdxs), required)) + } + + payload := slices.Concat(payloadParts[:rule.DataPartNum]...) + + require.GreaterOrEqual(t, uint64(len(payload)), ln) + + require.False(t, slices.ContainsFunc(payload[ln:], func(b byte) bool { return b != 0 })) + + return payload[:ln] +} + +func checkAndCutParentHeaderFromECPart(t *testing.T, part object.Object) object.Object { + par := part.Parent() + require.NotNil(t, par) + + require.Equal(t, par.Version(), part.Version()) + require.Equal(t, par.GetContainerID(), part.GetContainerID()) + require.Equal(t, par.Owner(), part.Owner()) + require.Equal(t, par.CreationEpoch(), part.CreationEpoch()) + require.Equal(t, object.TypeRegular, part.Type()) + require.Equal(t, par.SessionToken(), part.SessionToken()) + + return *par +} + +func checkAndGetECPartInfo(t testing.TB, part object.Object) (int, int) { + ruleIdxAttr := iobject.GetAttribute(part, "__NEOFS__EC_RULE_IDX") + require.NotZero(t, ruleIdxAttr) + ruleIdx, err := strconv.Atoi(ruleIdxAttr) + require.NoError(t, err) + require.True(t, ruleIdx >= 0) + + partIdxAttr := iobject.GetAttribute(part, "__NEOFS__EC_PART_IDX") + require.NotZero(t, partIdxAttr) + partIdx, err := strconv.Atoi(partIdxAttr) + require.NoError(t, err) + require.True(t, partIdx >= 0) + + return ruleIdx, partIdx +} + +func checkAndCutSplitECObject(t *testing.T, ln uint64, sessionToken session.Object, rules []iec.Rule, nodeObjLists [][]object.Object) object.Object { + splitPartCount := splitMembersCount(maxObjectSize, ln) + + var expectedCount int + for i := range rules { + expectedCount += int(rules[i].DataPartNum+rules[i].ParityPartNum) * splitPartCount + } + + require.EqualValues(t, expectedCount, islices.TwoDimSliceElementCount(nodeObjLists)) + + var splitParts []object.Object + for range splitPartCount { + splitPart := checkAndCutUnsplitECObject(t, rules, nodeObjLists) + splitParts = append(splitParts, splitPart) + } + + restoredObj := assertSplitChain(t, maxObjectSize, ln, sessionToken, splitParts) + + return restoredObj +} + +func checkAndCutUnsplitECObject(t *testing.T, rules []iec.Rule, nodeObjLists [][]object.Object) object.Object { + ecParts := checkAndCutECPartsForRule(t, 0, rules[0], nodeObjLists) + restoredObj := checkAndGetObjectFromECParts(t, maxObjectSize, rules[0], ecParts) + + for i := 1; i < len(rules); i++ { + ecPartsI := checkAndCutECPartsForRule(t, i, rules[i], nodeObjLists) + restoredObjI := checkAndGetObjectFromECParts(t, maxObjectSize, rules[i], ecPartsI) + require.Equal(t, restoredObj, restoredObjI) + } + + return restoredObj +} + +func checkAndCutECPartsForRule(t *testing.T, ruleIdx int, rule iec.Rule, nodeObjLists [][]object.Object) []object.Object { + var parts []object.Object + + for i := range rule.DataPartNum + rule.ParityPartNum { + gotRuleIdx, partIdx := checkAndGetECPartInfo(t, nodeObjLists[i][0]) + require.EqualValues(t, ruleIdx, gotRuleIdx) + require.EqualValues(t, i, partIdx) + + parts = append(parts, nodeObjLists[i][0]) + nodeObjLists[i] = nodeObjLists[i][1:] + } + + return parts +} diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 5f898b4124..ea463213e7 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + iec "github.com/nspcc-dev/neofs-node/internal/ec" "github.com/nspcc-dev/neofs-node/pkg/core/client" "github.com/nspcc-dev/neofs-node/pkg/services/object/internal" "github.com/nspcc-dev/neofs-node/pkg/services/object/util" @@ -121,6 +122,8 @@ func (p *Streamer) initTarget(prm *PutInitPrm) error { } } + sessionSigner := user.NewAutoIDSigner(*sessionKey) + prm.sessionSigner = sessionSigner p.target = &validatingTarget{ fmt: p.fmtValidator, unpreparedObject: true, @@ -128,7 +131,7 @@ func (p *Streamer) initTarget(prm *PutInitPrm) error { p.ctx, p.maxPayloadSz, !homomorphicChecksumRequired, - user.NewAutoIDSigner(*sessionKey), + sessionSigner, sToken, p.networkState.CurrentEpoch(), p.newCommonTarget(prm), @@ -166,14 +169,32 @@ func (p *Streamer) preparePrm(prm *PutInitPrm) error { return fmt.Errorf("select storage nodes for the container: %w", err) } cnrNodes := prm.containerNodes.Unsorted() -nextSet: - for i := range cnrNodes { - for j := range cnrNodes[i] { - prm.localNodeInContainer = p.neoFSNet.IsLocalNodePublicKey(cnrNodes[i][j].PublicKey()) - if prm.localNodeInContainer { - break nextSet + ecRulesN := len(prm.containerNodes.ECRules()) + if ecRulesN > 0 { + ecPart, err := iec.GetPartInfo(*prm.hdr) + if err != nil { + return fmt.Errorf("get EC part info from object header: %w", err) + } + + repRulesN := len(prm.containerNodes.PrimaryCounts()) + if ecPart.Index >= 0 { + if ecPart.RuleIndex >= ecRulesN { + return fmt.Errorf("invalid EC part info in object header: EC rule idx=%d with %d rules in total", ecPart.RuleIndex, ecRulesN) + } + if prm.hdr.Signature() == nil { + return errors.New("unsigned EC part object") } + prm.localNodeInContainer = localNodeInSet(p.neoFSNet, cnrNodes[repRulesN+ecPart.RuleIndex]) + } else { + if repRulesN == 0 && prm.hdr.Signature() != nil { + return errors.New("missing EC part info in signed object") + } + prm.localNodeInContainer = localNodeInSets(p.neoFSNet, cnrNodes) } + + prm.ecPart = ecPart + } else { + prm.localNodeInContainer = localNodeInSets(p.neoFSNet, cnrNodes) } if !prm.localNodeInContainer && localOnly { return errors.New("local operation on the node not compliant with the container storage policy") @@ -204,11 +225,10 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) internal.Target { networkMagicNumber: p.networkMagic, metaSvc: p.metaSvc, placementIterator: placementIterator{ - log: p.log, - neoFSNet: p.neoFSNet, - remotePool: p.remotePool, - containerNodes: prm.containerNodes, - linearReplNum: uint(prm.copiesNumber), + log: p.log, + neoFSNet: p.neoFSNet, + remotePool: p.remotePool, + linearReplNum: uint(prm.copiesNumber), }, localStorage: p.localStore, keyStorage: p.keyStorage, @@ -217,8 +237,11 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) internal.Target { transport: p.transport, relay: relay, fmt: p.fmtValidator, + containerNodes: prm.containerNodes, + ecPart: prm.ecPart, localNodeInContainer: prm.localNodeInContainer, localNodeSigner: prm.localNodeSigner, + sessionSigner: prm.sessionSigner, cnrClient: p.cfg.cnrClient, metainfoConsistencyAttr: metaAttribute(prm.cnr), metaSigner: prm.localSignerRFC6979, diff --git a/pkg/services/object/put/util.go b/pkg/services/object/put/util.go new file mode 100644 index 0000000000..380bcf2c14 --- /dev/null +++ b/pkg/services/object/put/util.go @@ -0,0 +1,19 @@ +package putsvc + +import ( + "slices" + + "github.com/nspcc-dev/neofs-sdk-go/netmap" +) + +func localNodeInSets(n NeoFSNetwork, ss [][]netmap.NodeInfo) bool { + return slices.ContainsFunc(ss, func(s []netmap.NodeInfo) bool { + return localNodeInSet(n, s) + }) +} + +func localNodeInSet(n NeoFSNetwork, nodes []netmap.NodeInfo) bool { + return slices.ContainsFunc(nodes, func(node netmap.NodeInfo) bool { + return n.IsLocalNodePublicKey(node.PublicKey()) + }) +} From 24bf2ea494602ca72b37357b078428e3da30522a Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 12:49:41 +0300 Subject: [PATCH 05/27] sn/object: Do not acquire meta lock for reading without concurrency On the one hand, unconditional locking seems safer. From the other one, such code could create a false impression that the set of signatures may change after being sent to the contract. However: 1. this cannot happen; 2. even if it did, it would be wrong behavior. Since lock is acquired for writing only now, `sync.RWMutex` is replaced with faster `sync.Mutex`. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/distributed.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index 3c376663b0..5ac89d4861 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -40,7 +40,7 @@ type distributedTarget struct { metainfoConsistencyAttr string metaSvc *meta.Meta - metaMtx sync.RWMutex + metaMtx sync.Mutex metaSigner neofscrypto.Signer objSharedMeta []byte collectedSignatures [][]byte @@ -228,9 +228,6 @@ func (t *distributedTarget) distributeObject(obj objectSDK.Object, objMeta objec } if t.localNodeInContainer && t.metainfoConsistencyAttr != "" { - t.metaMtx.RLock() - defer t.metaMtx.RUnlock() - if len(t.collectedSignatures) == 0 { return fmt.Errorf("skip metadata chain submit for %s object: no signatures were collected", id) } From 9fb9eea7c09e8ed7a1a1d08a8cbbd8c4b8429ee5 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 13:23:41 +0300 Subject: [PATCH 06/27] sn/object: Deduplicate copying struct fields Signed-off-by: Leonard Lyubich --- pkg/services/object/put/service.go | 6 ++---- pkg/services/object/put/streamer.go | 5 +---- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index e29cb1f011..6b7c48b5f6 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -148,10 +148,8 @@ func NewService(transport Transport, neoFSNet NeoFSNetwork, m *meta.Meta, opts . func (p *Service) Put(ctx context.Context) (*Streamer, error) { return &Streamer{ - cfg: p.cfg, - ctx: ctx, - transport: p.transport, - neoFSNet: p.neoFSNet, + Service: p, + ctx: ctx, }, nil } diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index ea463213e7..8f5c91e8bf 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -15,7 +15,7 @@ import ( ) type Streamer struct { - *cfg + *Service ctx context.Context @@ -24,9 +24,6 @@ type Streamer struct { relay func(client.NodeInfo, client.MultiAddressClient) error maxPayloadSz uint64 // network config - - transport Transport - neoFSNet NeoFSNetwork } var errNotInit = errors.New("stream not initialized") From 3efe8b0d403f9bb723e2cbe995394ec59fcecf0e Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 13:37:50 +0300 Subject: [PATCH 07/27] sn/object: De-struct `SendChunk()` parameters containing single field The parameter is one and required. Signed-off-by: Leonard Lyubich --- pkg/services/object/delete/util.go | 2 +- pkg/services/object/put/prm.go | 12 ------------ pkg/services/object/put/service_test.go | 6 ++---- pkg/services/object/put/streamer.go | 4 ++-- pkg/services/object/server.go | 2 +- 5 files changed, 6 insertions(+), 20 deletions(-) diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go index a5be698cbc..32bdc6b986 100644 --- a/pkg/services/object/delete/util.go +++ b/pkg/services/object/delete/util.go @@ -24,7 +24,7 @@ func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) { return nil, err } - err = streamer.SendChunk(new(putsvc.PutChunkPrm).WithChunk(payload)) + err = streamer.SendChunk(payload) if err != nil { return nil, err } diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go index 58761e3734..6e12ec42e8 100644 --- a/pkg/services/object/put/prm.go +++ b/pkg/services/object/put/prm.go @@ -28,10 +28,6 @@ type PutInitPrm struct { sessionSigner neofscrypto.Signer } -type PutChunkPrm struct { - chunk []byte -} - func (p *PutInitPrm) WithCommonPrm(v *util.CommonPrm) *PutInitPrm { if p != nil { p.common = v @@ -63,11 +59,3 @@ func (p *PutInitPrm) WithCopiesNumber(cn uint32) *PutInitPrm { return p } - -func (p *PutChunkPrm) WithChunk(v []byte) *PutChunkPrm { - if p != nil { - p.chunk = v - } - - return p -} diff --git a/pkg/services/object/put/service_test.go b/pkg/services/object/put/service_test.go index e3e72c9d79..0a4a2adb65 100644 --- a/pkg/services/object/put/service_test.go +++ b/pkg/services/object/put/service_test.go @@ -559,7 +559,7 @@ func (m *serviceClient) ForEachGRPCConn(context.Context, func(context.Context, * type testPayloadStream Streamer func (x *testPayloadStream) Write(p []byte) (int, error) { - if err := (*Streamer)(x).SendChunk(new(PutChunkPrm).WithChunk(p)); err != nil { + if err := (*Streamer)(x).SendChunk(p); err != nil { return 0, err } return len(p), nil @@ -614,9 +614,7 @@ func storeObjectWithSession(t *testing.T, svc *Service, obj object.Object, st se WithCommonPrm(commonPrm) require.NoError(t, stream.Init(ip)) - cp := new(PutChunkPrm). - WithChunk(obj.Payload()) - require.NoError(t, stream.SendChunk(cp)) + require.NoError(t, stream.SendChunk(obj.Payload())) _, err = stream.Close() require.NoError(t, err) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 8f5c91e8bf..99b2cec3a9 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -246,12 +246,12 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) internal.Target { } } -func (p *Streamer) SendChunk(prm *PutChunkPrm) error { +func (p *Streamer) SendChunk(chunk []byte) error { if p.target == nil { return errNotInit } - if _, err := p.target.Write(prm.chunk); err != nil { + if _, err := p.target.Write(chunk); err != nil { return fmt.Errorf("(%T) could not write payload chunk to target: %w", p, err) } diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index 7e8c7445a7..4abd3dd7cb 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -377,7 +377,7 @@ func (x *putStream) forwardRequest(req *protoobject.PutRequest) error { return putsvc.ErrWrongPayloadSize } } - if err := x.base.SendChunk(new(putsvc.PutChunkPrm).WithChunk(c)); err != nil { + if err := x.base.SendChunk(c); err != nil { return fmt.Errorf("could not send payload chunk: %w", err) } if !x.cacheReqs { From 334c62f7f57c9d3468cdd8c794a17a10cd0a3c18 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 13:49:46 +0300 Subject: [PATCH 08/27] sn/object: Split required and optional parameters of `Streamer.Init()` So that it is clear what must be specified and what is not. Signed-off-by: Leonard Lyubich --- pkg/services/object/delete/util.go | 6 +- pkg/services/object/put/prm.go | 28 +------- pkg/services/object/put/service_test.go | 12 ++-- pkg/services/object/put/streamer.go | 87 +++++++++++++------------ pkg/services/object/server.go | 10 ++- 5 files changed, 57 insertions(+), 86 deletions(-) diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go index 32bdc6b986..60953c1127 100644 --- a/pkg/services/object/delete/util.go +++ b/pkg/services/object/delete/util.go @@ -15,11 +15,9 @@ func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) { payload := exec.tombstoneObj.Payload() - initPrm := new(putsvc.PutInitPrm). - WithCommonPrm(exec.commonParameters()). - WithObject(exec.tombstoneObj.CutPayload()) + opts := new(putsvc.PutInitOptions) - err = streamer.Init(initPrm) + err = streamer.Init(exec.tombstoneObj.CutPayload(), exec.commonParameters(), opts) if err != nil { return nil, err } diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go index 6e12ec42e8..9658dd4e92 100644 --- a/pkg/services/object/put/prm.go +++ b/pkg/services/object/put/prm.go @@ -3,17 +3,11 @@ package putsvc import ( iec "github.com/nspcc-dev/neofs-node/internal/ec" "github.com/nspcc-dev/neofs-node/pkg/core/client" - "github.com/nspcc-dev/neofs-node/pkg/services/object/util" containerSDK "github.com/nspcc-dev/neofs-sdk-go/container" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/object" ) -type PutInitPrm struct { - common *util.CommonPrm - - hdr *object.Object - +type PutInitOptions struct { cnr containerSDK.Container copiesNumber uint32 @@ -28,23 +22,7 @@ type PutInitPrm struct { sessionSigner neofscrypto.Signer } -func (p *PutInitPrm) WithCommonPrm(v *util.CommonPrm) *PutInitPrm { - if p != nil { - p.common = v - } - - return p -} - -func (p *PutInitPrm) WithObject(v *object.Object) *PutInitPrm { - if p != nil { - p.hdr = v - } - - return p -} - -func (p *PutInitPrm) WithRelay(f func(client.NodeInfo, client.MultiAddressClient) error) *PutInitPrm { +func (p *PutInitOptions) WithRelay(f func(client.NodeInfo, client.MultiAddressClient) error) *PutInitOptions { if p != nil { p.relay = f } @@ -52,7 +30,7 @@ func (p *PutInitPrm) WithRelay(f func(client.NodeInfo, client.MultiAddressClient return p } -func (p *PutInitPrm) WithCopiesNumber(cn uint32) *PutInitPrm { +func (p *PutInitOptions) WithCopiesNumber(cn uint32) *PutInitOptions { if p != nil { p.copiesNumber = cn } diff --git a/pkg/services/object/put/service_test.go b/pkg/services/object/put/service_test.go index 0a4a2adb65..a96fbacbdf 100644 --- a/pkg/services/object/put/service_test.go +++ b/pkg/services/object/put/service_test.go @@ -500,11 +500,9 @@ func (m *serviceClient) ObjectPutInit(ctx context.Context, hdr object.Object, _ panic(err) } - var ip PutInitPrm - ip.WithObject(hdr.CutPayload()) - ip.WithCommonPrm(commonPrm) + var opts PutInitOptions - if err := stream.Init(&ip); err != nil { + if err := stream.Init(hdr.CutPayload(), commonPrm, &opts); err != nil { return nil, err } @@ -609,10 +607,8 @@ func storeObjectWithSession(t *testing.T, svc *Service, obj object.Object, st se commonPrm, err := objutil.CommonPrmFromRequest(req) require.NoError(t, err) - ip := new(PutInitPrm). - WithObject(obj.CutPayload()). - WithCommonPrm(commonPrm) - require.NoError(t, stream.Init(ip)) + opts := new(PutInitOptions) + require.NoError(t, stream.Init(obj.CutPayload(), commonPrm, opts)) require.NoError(t, stream.SendChunk(obj.Payload())) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 99b2cec3a9..57af8535f6 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -11,6 +11,7 @@ import ( "github.com/nspcc-dev/neofs-node/pkg/services/object/util" "github.com/nspcc-dev/neofs-sdk-go/container" neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" + "github.com/nspcc-dev/neofs-sdk-go/object" "github.com/nspcc-dev/neofs-sdk-go/user" ) @@ -30,13 +31,13 @@ var errNotInit = errors.New("stream not initialized") var errInitRecall = errors.New("init recall") -func (p *Streamer) Init(prm *PutInitPrm) error { +func (p *Streamer) Init(hdr *object.Object, cp *util.CommonPrm, opts *PutInitOptions) error { // initialize destination target - if err := p.initTarget(prm); err != nil { + if err := p.initTarget(hdr, cp, opts); err != nil { return fmt.Errorf("(%T) could not initialize object target: %w", p, err) } - if err := p.target.WriteHeader(prm.hdr); err != nil { + if err := p.target.WriteHeader(hdr); err != nil { return fmt.Errorf("(%T) could not write header to target: %w", p, err) } return nil @@ -49,14 +50,14 @@ func (p *Streamer) MaxObjectSize() uint64 { return p.maxPayloadSz } -func (p *Streamer) initTarget(prm *PutInitPrm) error { +func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts *PutInitOptions) error { // prevent re-calling if p.target != nil { return errInitRecall } // prepare needed put parameters - if err := p.preparePrm(prm); err != nil { + if err := p.prepareOptions(hdr, cp, opts); err != nil { return fmt.Errorf("(%T) could not prepare put parameters: %w", p, err) } @@ -65,14 +66,14 @@ func (p *Streamer) initTarget(prm *PutInitPrm) error { return fmt.Errorf("(%T) could not obtain max object size parameter", p) } - homomorphicChecksumRequired := !prm.cnr.IsHomomorphicHashingDisabled() + homomorphicChecksumRequired := !opts.cnr.IsHomomorphicHashingDisabled() - if prm.hdr.Signature() != nil { - p.relay = prm.relay + if hdr.Signature() != nil { + p.relay = opts.relay // prepare untrusted-Put object target p.target = &validatingTarget{ - nextTarget: p.newCommonTarget(prm), + nextTarget: p.newCommonTarget(cp, opts), fmt: p.fmtValidator, maxPayloadSz: p.maxPayloadSz, @@ -83,7 +84,7 @@ func (p *Streamer) initTarget(prm *PutInitPrm) error { return nil } - sToken := prm.common.SessionToken() + sToken := cp.SessionToken() // prepare trusted-Put object target @@ -107,7 +108,7 @@ func (p *Streamer) initTarget(prm *PutInitPrm) error { // In case session token is missing, the line above returns the default key. // If it isn't owner key, replication attempts will fail, thus this check. if sToken == nil { - ownerObj := prm.hdr.Owner() + ownerObj := hdr.Owner() if ownerObj.IsZero() { return errors.New("missing object owner") } @@ -120,7 +121,7 @@ func (p *Streamer) initTarget(prm *PutInitPrm) error { } sessionSigner := user.NewAutoIDSigner(*sessionKey) - prm.sessionSigner = sessionSigner + opts.sessionSigner = sessionSigner p.target = &validatingTarget{ fmt: p.fmtValidator, unpreparedObject: true, @@ -131,7 +132,7 @@ func (p *Streamer) initTarget(prm *PutInitPrm) error { sessionSigner, sToken, p.networkState.CurrentEpoch(), - p.newCommonTarget(prm), + p.newCommonTarget(cp, opts), ), homomorphicChecksumRequired: homomorphicChecksumRequired, } @@ -139,9 +140,9 @@ func (p *Streamer) initTarget(prm *PutInitPrm) error { return nil } -func (p *Streamer) preparePrm(prm *PutInitPrm) error { - localOnly := prm.common.LocalOnly() - if localOnly && prm.copiesNumber > 1 { +func (p *Streamer) prepareOptions(hdr *object.Object, cp *util.CommonPrm, opts *PutInitOptions) error { + localOnly := cp.LocalOnly() + if localOnly && opts.copiesNumber > 1 { return errors.New("storage of multiple object replicas is requested for a local operation") } @@ -150,60 +151,60 @@ func (p *Streamer) preparePrm(prm *PutInitPrm) error { return fmt.Errorf("get local node's private key: %w", err) } - idCnr := prm.hdr.GetContainerID() + idCnr := hdr.GetContainerID() if idCnr.IsZero() { return errors.New("missing container ID") } // get container to store the object - prm.cnr, err = p.cnrSrc.Get(idCnr) + opts.cnr, err = p.cnrSrc.Get(idCnr) if err != nil { return fmt.Errorf("(%T) could not get container by ID: %w", p, err) } - prm.containerNodes, err = p.neoFSNet.GetContainerNodes(idCnr) + opts.containerNodes, err = p.neoFSNet.GetContainerNodes(idCnr) if err != nil { return fmt.Errorf("select storage nodes for the container: %w", err) } - cnrNodes := prm.containerNodes.Unsorted() - ecRulesN := len(prm.containerNodes.ECRules()) + cnrNodes := opts.containerNodes.Unsorted() + ecRulesN := len(opts.containerNodes.ECRules()) if ecRulesN > 0 { - ecPart, err := iec.GetPartInfo(*prm.hdr) + ecPart, err := iec.GetPartInfo(*hdr) if err != nil { return fmt.Errorf("get EC part info from object header: %w", err) } - repRulesN := len(prm.containerNodes.PrimaryCounts()) + repRulesN := len(opts.containerNodes.PrimaryCounts()) if ecPart.Index >= 0 { if ecPart.RuleIndex >= ecRulesN { return fmt.Errorf("invalid EC part info in object header: EC rule idx=%d with %d rules in total", ecPart.RuleIndex, ecRulesN) } - if prm.hdr.Signature() == nil { + if hdr.Signature() == nil { return errors.New("unsigned EC part object") } - prm.localNodeInContainer = localNodeInSet(p.neoFSNet, cnrNodes[repRulesN+ecPart.RuleIndex]) + opts.localNodeInContainer = localNodeInSet(p.neoFSNet, cnrNodes[repRulesN+ecPart.RuleIndex]) } else { - if repRulesN == 0 && prm.hdr.Signature() != nil { + if repRulesN == 0 && hdr.Signature() != nil { return errors.New("missing EC part info in signed object") } - prm.localNodeInContainer = localNodeInSets(p.neoFSNet, cnrNodes) + opts.localNodeInContainer = localNodeInSets(p.neoFSNet, cnrNodes) } - prm.ecPart = ecPart + opts.ecPart = ecPart } else { - prm.localNodeInContainer = localNodeInSets(p.neoFSNet, cnrNodes) + opts.localNodeInContainer = localNodeInSets(p.neoFSNet, cnrNodes) } - if !prm.localNodeInContainer && localOnly { + if !opts.localNodeInContainer && localOnly { return errors.New("local operation on the node not compliant with the container storage policy") } - prm.localNodeSigner = (*neofsecdsa.Signer)(localNodeKey) - prm.localSignerRFC6979 = (*neofsecdsa.SignerRFC6979)(localNodeKey) + opts.localNodeSigner = (*neofsecdsa.Signer)(localNodeKey) + opts.localSignerRFC6979 = (*neofsecdsa.SignerRFC6979)(localNodeKey) return nil } -func (p *Streamer) newCommonTarget(prm *PutInitPrm) internal.Target { +func (p *Streamer) newCommonTarget(cp *util.CommonPrm, opts *PutInitOptions) internal.Target { var relay func(nodeDesc) error if p.relay != nil { relay = func(node nodeDesc) error { @@ -225,24 +226,24 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) internal.Target { log: p.log, neoFSNet: p.neoFSNet, remotePool: p.remotePool, - linearReplNum: uint(prm.copiesNumber), + linearReplNum: uint(opts.copiesNumber), }, localStorage: p.localStore, keyStorage: p.keyStorage, - commonPrm: prm.common, + commonPrm: cp, clientConstructor: p.clientConstructor, transport: p.transport, relay: relay, fmt: p.fmtValidator, - containerNodes: prm.containerNodes, - ecPart: prm.ecPart, - localNodeInContainer: prm.localNodeInContainer, - localNodeSigner: prm.localNodeSigner, - sessionSigner: prm.sessionSigner, + containerNodes: opts.containerNodes, + ecPart: opts.ecPart, + localNodeInContainer: opts.localNodeInContainer, + localNodeSigner: opts.localNodeSigner, + sessionSigner: opts.sessionSigner, cnrClient: p.cfg.cnrClient, - metainfoConsistencyAttr: metaAttribute(prm.cnr), - metaSigner: prm.localSignerRFC6979, - localOnly: prm.common.LocalOnly(), + metainfoConsistencyAttr: metaAttribute(opts.cnr), + metaSigner: opts.localSignerRFC6979, + localOnly: cp.LocalOnly(), } } diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index 4abd3dd7cb..8ae31a1d3c 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -348,12 +348,10 @@ func (x *putStream) forwardRequest(req *protoobject.PutRequest) error { return err } - var p putsvc.PutInitPrm - p.WithCommonPrm(cp) - p.WithObject(obj) - p.WithCopiesNumber(v.Init.CopiesNumber) - p.WithRelay(x.sendToRemoteNode) - if err = x.base.Init(&p); err != nil { + var opts putsvc.PutInitOptions + opts.WithCopiesNumber(v.Init.CopiesNumber) + opts.WithRelay(x.sendToRemoteNode) + if err = x.base.Init(obj, cp, &opts); err != nil { return fmt.Errorf("could not init object put stream: %w", err) } From 88e3efe4735e8220da9a4d2ec662ef6e2910967f Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 14:03:25 +0300 Subject: [PATCH 09/27] sn/object: Accept optional `Streamer.Init()` parameters by value Method did not check pointer for nil. Anyway, it is never nil. Signed-off-by: Leonard Lyubich --- pkg/services/object/delete/util.go | 2 +- pkg/services/object/put/service_test.go | 4 ++-- pkg/services/object/put/streamer.go | 8 ++++---- pkg/services/object/server.go | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go index 60953c1127..239ea32c41 100644 --- a/pkg/services/object/delete/util.go +++ b/pkg/services/object/delete/util.go @@ -15,7 +15,7 @@ func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) { payload := exec.tombstoneObj.Payload() - opts := new(putsvc.PutInitOptions) + var opts putsvc.PutInitOptions err = streamer.Init(exec.tombstoneObj.CutPayload(), exec.commonParameters(), opts) if err != nil { diff --git a/pkg/services/object/put/service_test.go b/pkg/services/object/put/service_test.go index a96fbacbdf..82c783d664 100644 --- a/pkg/services/object/put/service_test.go +++ b/pkg/services/object/put/service_test.go @@ -502,7 +502,7 @@ func (m *serviceClient) ObjectPutInit(ctx context.Context, hdr object.Object, _ var opts PutInitOptions - if err := stream.Init(hdr.CutPayload(), commonPrm, &opts); err != nil { + if err := stream.Init(hdr.CutPayload(), commonPrm, opts); err != nil { return nil, err } @@ -607,7 +607,7 @@ func storeObjectWithSession(t *testing.T, svc *Service, obj object.Object, st se commonPrm, err := objutil.CommonPrmFromRequest(req) require.NoError(t, err) - opts := new(PutInitOptions) + var opts PutInitOptions require.NoError(t, stream.Init(obj.CutPayload(), commonPrm, opts)) require.NoError(t, stream.SendChunk(obj.Payload())) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 57af8535f6..be7637af40 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -31,7 +31,7 @@ var errNotInit = errors.New("stream not initialized") var errInitRecall = errors.New("init recall") -func (p *Streamer) Init(hdr *object.Object, cp *util.CommonPrm, opts *PutInitOptions) error { +func (p *Streamer) Init(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) error { // initialize destination target if err := p.initTarget(hdr, cp, opts); err != nil { return fmt.Errorf("(%T) could not initialize object target: %w", p, err) @@ -50,14 +50,14 @@ func (p *Streamer) MaxObjectSize() uint64 { return p.maxPayloadSz } -func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts *PutInitOptions) error { +func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) error { // prevent re-calling if p.target != nil { return errInitRecall } // prepare needed put parameters - if err := p.prepareOptions(hdr, cp, opts); err != nil { + if err := p.prepareOptions(hdr, cp, &opts); err != nil { return fmt.Errorf("(%T) could not prepare put parameters: %w", p, err) } @@ -204,7 +204,7 @@ func (p *Streamer) prepareOptions(hdr *object.Object, cp *util.CommonPrm, opts * return nil } -func (p *Streamer) newCommonTarget(cp *util.CommonPrm, opts *PutInitOptions) internal.Target { +func (p *Streamer) newCommonTarget(cp *util.CommonPrm, opts PutInitOptions) internal.Target { var relay func(nodeDesc) error if p.relay != nil { relay = func(node nodeDesc) error { diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index 8ae31a1d3c..12cfbba45f 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -351,7 +351,7 @@ func (x *putStream) forwardRequest(req *protoobject.PutRequest) error { var opts putsvc.PutInitOptions opts.WithCopiesNumber(v.Init.CopiesNumber) opts.WithRelay(x.sendToRemoteNode) - if err = x.base.Init(obj, cp, &opts); err != nil { + if err = x.base.Init(obj, cp, opts); err != nil { return fmt.Errorf("could not init object put stream: %w", err) } From ecca22f6ac2fd854c642bd0e4026052ddc697a59 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 14:23:04 +0300 Subject: [PATCH 10/27] sn/object: Typedef relay function signature for its deduplication Signed-off-by: Leonard Lyubich --- pkg/services/object/put/prm.go | 7 +++++-- pkg/services/object/put/streamer.go | 3 +-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go index 9658dd4e92..29da3bb419 100644 --- a/pkg/services/object/put/prm.go +++ b/pkg/services/object/put/prm.go @@ -7,12 +7,15 @@ import ( neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" ) +// RelayFunc relays request using given connection to SN. +type RelayFunc = func(client.NodeInfo, client.MultiAddressClient) error + type PutInitOptions struct { cnr containerSDK.Container copiesNumber uint32 - relay func(client.NodeInfo, client.MultiAddressClient) error + relay RelayFunc containerNodes ContainerNodes ecPart iec.PartInfo @@ -22,7 +25,7 @@ type PutInitOptions struct { sessionSigner neofscrypto.Signer } -func (p *PutInitOptions) WithRelay(f func(client.NodeInfo, client.MultiAddressClient) error) *PutInitOptions { +func (p *PutInitOptions) WithRelay(f RelayFunc) *PutInitOptions { if p != nil { p.relay = f } diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index be7637af40..660c744a20 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -6,7 +6,6 @@ import ( "fmt" iec "github.com/nspcc-dev/neofs-node/internal/ec" - "github.com/nspcc-dev/neofs-node/pkg/core/client" "github.com/nspcc-dev/neofs-node/pkg/services/object/internal" "github.com/nspcc-dev/neofs-node/pkg/services/object/util" "github.com/nspcc-dev/neofs-sdk-go/container" @@ -22,7 +21,7 @@ type Streamer struct { target internal.Target - relay func(client.NodeInfo, client.MultiAddressClient) error + relay RelayFunc maxPayloadSz uint64 // network config } From 917116afdb716f3e2c001daefb9cfc581c562b2a Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 14:25:47 +0300 Subject: [PATCH 11/27] sn/object: Pass relay function parameter explicitly It's clearer this way. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/streamer.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 660c744a20..7ee53780da 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -21,8 +21,6 @@ type Streamer struct { target internal.Target - relay RelayFunc - maxPayloadSz uint64 // network config } @@ -68,11 +66,9 @@ func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutIn homomorphicChecksumRequired := !opts.cnr.IsHomomorphicHashingDisabled() if hdr.Signature() != nil { - p.relay = opts.relay - // prepare untrusted-Put object target p.target = &validatingTarget{ - nextTarget: p.newCommonTarget(cp, opts), + nextTarget: p.newCommonTarget(cp, opts, opts.relay), fmt: p.fmtValidator, maxPayloadSz: p.maxPayloadSz, @@ -131,7 +127,7 @@ func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutIn sessionSigner, sToken, p.networkState.CurrentEpoch(), - p.newCommonTarget(cp, opts), + p.newCommonTarget(cp, opts, nil), ), homomorphicChecksumRequired: homomorphicChecksumRequired, } @@ -203,16 +199,16 @@ func (p *Streamer) prepareOptions(hdr *object.Object, cp *util.CommonPrm, opts * return nil } -func (p *Streamer) newCommonTarget(cp *util.CommonPrm, opts PutInitOptions) internal.Target { +func (p *Streamer) newCommonTarget(cp *util.CommonPrm, opts PutInitOptions, relayFn RelayFunc) internal.Target { var relay func(nodeDesc) error - if p.relay != nil { + if relayFn != nil { relay = func(node nodeDesc) error { c, err := p.clientConstructor.Get(node.info) if err != nil { return fmt.Errorf("could not create SDK client %s: %w", node.info.AddressGroup(), err) } - return p.relay(node.info, c) + return relayFn(node.info, c) } } From b393035c13b8c33baba1ff1fb97ef2e3dcc7cbf0 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 14:45:50 +0300 Subject: [PATCH 12/27] sn/object: De-struct `Streamer.Close()` return containing single field There is only one result. Signed-off-by: Leonard Lyubich --- pkg/services/object/delete/util.go | 4 +--- pkg/services/object/put/res.go | 13 ------------- pkg/services/object/put/streamer.go | 11 +++++------ pkg/services/object/server.go | 3 +-- 4 files changed, 7 insertions(+), 24 deletions(-) delete mode 100644 pkg/services/object/put/res.go diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go index 239ea32c41..c1c4faa96b 100644 --- a/pkg/services/object/delete/util.go +++ b/pkg/services/object/delete/util.go @@ -27,12 +27,10 @@ func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) { return nil, err } - r, err := streamer.Close() + id, err := streamer.Close() if err != nil { return nil, err } - id := r.ObjectID() - return &id, nil } diff --git a/pkg/services/object/put/res.go b/pkg/services/object/put/res.go deleted file mode 100644 index 920a86a0cd..0000000000 --- a/pkg/services/object/put/res.go +++ /dev/null @@ -1,13 +0,0 @@ -package putsvc - -import ( - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" -) - -type PutResponse struct { - id oid.ID -} - -func (r *PutResponse) ObjectID() oid.ID { - return r.id -} diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 7ee53780da..2ffd2c7520 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -11,6 +11,7 @@ import ( "github.com/nspcc-dev/neofs-sdk-go/container" neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" "github.com/nspcc-dev/neofs-sdk-go/object" + oid "github.com/nspcc-dev/neofs-sdk-go/object/id" "github.com/nspcc-dev/neofs-sdk-go/user" ) @@ -254,19 +255,17 @@ func (p *Streamer) SendChunk(chunk []byte) error { return nil } -func (p *Streamer) Close() (*PutResponse, error) { +func (p *Streamer) Close() (oid.ID, error) { if p.target == nil { - return nil, errNotInit + return oid.ID{}, errNotInit } id, err := p.target.Close() if err != nil { - return nil, fmt.Errorf("(%T) could not close object target: %w", p, err) + return oid.ID{}, fmt.Errorf("(%T) could not close object target: %w", p, err) } - return &PutResponse{ - id: id, - }, nil + return id, nil } func metaAttribute(cnr container.Container) string { diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index 12cfbba45f..e2eb30c4b1 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -395,12 +395,11 @@ func (x *putStream) close() (*protoobject.PutResponse, error) { return nil, putsvc.ErrWrongPayloadSize } - resp, err := x.base.Close() + id, err := x.base.Close() if err != nil { return nil, fmt.Errorf("could not object put stream: %w", err) } - id := resp.ObjectID() return &protoobject.PutResponse{ Body: &protoobject.PutResponse_Body{ ObjectId: id.ProtoMessage(), From 63cd90a654daf34da5d7f574c97518a74ace83e4 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 15:13:16 +0300 Subject: [PATCH 13/27] sn/object: Check PUT message stream flow in server handler Previously, it was checked by internal processor and had disadvantages described below. PUT handler detected a flow violation too late. For example, on repeated heading message receipt, the server done resource-intensive checks of the request signatures and body. Although flow check is extremely lightweight. The internal processor is also used for internal SN needs. It always uses it correctly, therefore checks are unnecessary. Now stream control is done instantly upon receipt of the next message. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/streamer.go | 17 ----------------- pkg/services/object/server.go | 29 ++++++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 2ffd2c7520..481562c132 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -25,10 +25,6 @@ type Streamer struct { maxPayloadSz uint64 // network config } -var errNotInit = errors.New("stream not initialized") - -var errInitRecall = errors.New("init recall") - func (p *Streamer) Init(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) error { // initialize destination target if err := p.initTarget(hdr, cp, opts); err != nil { @@ -49,11 +45,6 @@ func (p *Streamer) MaxObjectSize() uint64 { } func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) error { - // prevent re-calling - if p.target != nil { - return errInitRecall - } - // prepare needed put parameters if err := p.prepareOptions(hdr, cp, &opts); err != nil { return fmt.Errorf("(%T) could not prepare put parameters: %w", p, err) @@ -244,10 +235,6 @@ func (p *Streamer) newCommonTarget(cp *util.CommonPrm, opts PutInitOptions, rela } func (p *Streamer) SendChunk(chunk []byte) error { - if p.target == nil { - return errNotInit - } - if _, err := p.target.Write(chunk); err != nil { return fmt.Errorf("(%T) could not write payload chunk to target: %w", p, err) } @@ -256,10 +243,6 @@ func (p *Streamer) SendChunk(chunk []byte) error { } func (p *Streamer) Close() (oid.ID, error) { - if p.target == nil { - return oid.ID{}, errNotInit - } - id, err := p.target.Close() if err != nil { return oid.ID{}, fmt.Errorf("(%T) could not close object target: %w", p, err) diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index e2eb30c4b1..ccc6f88146 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -326,7 +326,7 @@ func (x *putStream) resignRequest(req *protoobject.PutRequest) (*protoobject.Put func (x *putStream) forwardRequest(req *protoobject.PutRequest) error { switch v := req.GetBody().GetObjectPart().(type) { default: - return fmt.Errorf("invalid object put stream part type %T", v) + panic(fmt.Errorf("invalid object put stream part type %T", v)) case *protoobject.PutRequest_Body_Init_: if v == nil || v.Init == nil { // TODO: seems like this is done several times, deduplicate return errors.New("nil oneof field with heading part") @@ -418,11 +418,16 @@ func (s *Server) Put(gStream protoobject.ObjectService_PutServer) error { var req *protoobject.PutRequest var resp *protoobject.PutResponse + var headerWas bool ps := newIntermediatePutStream(s.signer, stream, gStream.Context()) for { if req, err = gStream.Recv(); err != nil { if errors.Is(err, io.EOF) { + if !headerWas { + return s.sendStatusPutResponse(gStream, errors.New("stream is closed without messages")) + } + resp, err = ps.close() if err != nil { return s.sendStatusPutResponse(gStream, err) @@ -434,8 +439,26 @@ func (s *Server) Put(gStream protoobject.ObjectService_PutServer) error { return err } - if c := req.GetBody().GetChunk(); c != nil { - s.metrics.AddPutPayload(len(c)) + switch v := req.GetBody().GetObjectPart().(type) { + default: + err = fmt.Errorf("invalid object put stream part type %T", v) + case *protoobject.PutRequest_Body_Init_: + if headerWas { + err = errors.New("repeated message with object header") + break + } + headerWas = true + case *protoobject.PutRequest_Body_Chunk: + if !headerWas { + err = errors.New("message with payload chunk before object header") + break + } + + s.metrics.AddPutPayload(len(v.Chunk)) + } + if err != nil { + err = s.sendStatusPutResponse(gStream, err) // assign for defer + return err } if err = icrypto.VerifyRequestSignaturesN3(req, s.fsChain); err != nil { From 8e9b99bf768e97c5f9f3468b60792793ef6e9106 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 15:49:35 +0300 Subject: [PATCH 14/27] sn/object: Deduplicate payload limit check in forwarded PUT case Signed-off-by: Leonard Lyubich --- pkg/services/object/put/streamer.go | 17 ++++------------- pkg/services/object/server.go | 3 --- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 481562c132..696906553c 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -21,8 +21,6 @@ type Streamer struct { ctx context.Context target internal.Target - - maxPayloadSz uint64 // network config } func (p *Streamer) Init(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) error { @@ -37,21 +35,14 @@ func (p *Streamer) Init(hdr *object.Object, cp *util.CommonPrm, opts PutInitOpti return nil } -// MaxObjectSize returns maximum payload size for the streaming session. -// -// Must be called after the successful Init. -func (p *Streamer) MaxObjectSize() uint64 { - return p.maxPayloadSz -} - func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) error { // prepare needed put parameters if err := p.prepareOptions(hdr, cp, &opts); err != nil { return fmt.Errorf("(%T) could not prepare put parameters: %w", p, err) } - p.maxPayloadSz = p.maxSizeSrc.MaxObjectSize() - if p.maxPayloadSz == 0 { + maxPayloadSz := p.maxSizeSrc.MaxObjectSize() + if maxPayloadSz == 0 { return fmt.Errorf("(%T) could not obtain max object size parameter", p) } @@ -63,7 +54,7 @@ func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutIn nextTarget: p.newCommonTarget(cp, opts, opts.relay), fmt: p.fmtValidator, - maxPayloadSz: p.maxPayloadSz, + maxPayloadSz: maxPayloadSz, homomorphicChecksumRequired: homomorphicChecksumRequired, } @@ -114,7 +105,7 @@ func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutIn unpreparedObject: true, nextTarget: newSlicingTarget( p.ctx, - p.maxPayloadSz, + maxPayloadSz, !homomorphicChecksumRequired, sessionSigner, sToken, diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index ccc6f88146..1610e92628 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -360,9 +360,6 @@ func (x *putStream) forwardRequest(req *protoobject.PutRequest) error { } x.expBytes = v.Init.Header.GetPayloadLength() - if m := x.base.MaxObjectSize(); x.expBytes > m { - return putsvc.ErrExceedingMaxSize - } signed, err := x.resignRequest(req) // TODO: resign only when needed if err != nil { return err // TODO: add context From 12aa4e8ca324b768be35d6ac319c2a2c12bf297d Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 15:55:39 +0300 Subject: [PATCH 15/27] sn/object: Drop internal target entity from errors' context `internal.Target` is an implementation detail that should not be included in server responses. Although currently server does not attach them to the response status, after #2744 it would have shown up. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/streamer.go | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 696906553c..ba0f4581cc 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -26,13 +26,10 @@ type Streamer struct { func (p *Streamer) Init(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) error { // initialize destination target if err := p.initTarget(hdr, cp, opts); err != nil { - return fmt.Errorf("(%T) could not initialize object target: %w", p, err) + return err } - if err := p.target.WriteHeader(hdr); err != nil { - return fmt.Errorf("(%T) could not write header to target: %w", p, err) - } - return nil + return p.target.WriteHeader(hdr) } func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) error { @@ -226,17 +223,14 @@ func (p *Streamer) newCommonTarget(cp *util.CommonPrm, opts PutInitOptions, rela } func (p *Streamer) SendChunk(chunk []byte) error { - if _, err := p.target.Write(chunk); err != nil { - return fmt.Errorf("(%T) could not write payload chunk to target: %w", p, err) - } - - return nil + _, err := p.target.Write(chunk) + return err } func (p *Streamer) Close() (oid.ID, error) { id, err := p.target.Close() if err != nil { - return oid.ID{}, fmt.Errorf("(%T) could not close object target: %w", p, err) + return oid.ID{}, err } return id, nil From ad16f4971877d3038da3a9d223cbf069c655eb2a Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 16:18:06 +0300 Subject: [PATCH 16/27] sn/object: Bring `putsvc.Streamer` interface closer to proto PUT stream In addition to having fewer methods, it is now impossible to pass chunk or close before the header. This reduces the likelihood of dev mistakes. Signed-off-by: Leonard Lyubich --- pkg/services/object/delete/util.go | 6 +++--- pkg/services/object/internal/target.go | 11 ++++++++--- pkg/services/object/put/service_test.go | 25 ++++++++++++------------- pkg/services/object/put/streamer.go | 25 +++++++------------------ pkg/services/object/server.go | 8 +++++--- 5 files changed, 35 insertions(+), 40 deletions(-) diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go index c1c4faa96b..610f037a5d 100644 --- a/pkg/services/object/delete/util.go +++ b/pkg/services/object/delete/util.go @@ -17,17 +17,17 @@ func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) { var opts putsvc.PutInitOptions - err = streamer.Init(exec.tombstoneObj.CutPayload(), exec.commonParameters(), opts) + pw, err := streamer.WriteHeader(exec.tombstoneObj.CutPayload(), exec.commonParameters(), opts) if err != nil { return nil, err } - err = streamer.SendChunk(payload) + _, err = pw.Write(payload) if err != nil { return nil, err } - id, err := streamer.Close() + id, err := pw.Close() if err != nil { return nil, err } diff --git a/pkg/services/object/internal/target.go b/pkg/services/object/internal/target.go index 828eaacf8e..cdfacd5c17 100644 --- a/pkg/services/object/internal/target.go +++ b/pkg/services/object/internal/target.go @@ -21,9 +21,8 @@ type HeaderWriter interface { WriteHeader(*object.Object) error } -// Target is an interface of the object writer. -type Target interface { - HeaderWriter +// PayloadWriter is an interface of the object payload writer. +type PayloadWriter interface { // Writer writes object payload chunk. // // Can be called multiple times. @@ -41,3 +40,9 @@ type Target interface { // that depends on the implementation. Close() (oid.ID, error) } + +// Target is an interface of the object writer. +type Target interface { + HeaderWriter + PayloadWriter +} diff --git a/pkg/services/object/put/service_test.go b/pkg/services/object/put/service_test.go index 82c783d664..e2a1aab73f 100644 --- a/pkg/services/object/put/service_test.go +++ b/pkg/services/object/put/service_test.go @@ -20,6 +20,7 @@ import ( islices "github.com/nspcc-dev/neofs-node/internal/slices" "github.com/nspcc-dev/neofs-node/internal/testutil" clientcore "github.com/nspcc-dev/neofs-node/pkg/core/client" + "github.com/nspcc-dev/neofs-node/pkg/services/object/internal" objutil "github.com/nspcc-dev/neofs-node/pkg/services/object/util" "github.com/nspcc-dev/neofs-node/pkg/services/session/storage" "github.com/nspcc-dev/neofs-sdk-go/checksum" @@ -502,11 +503,12 @@ func (m *serviceClient) ObjectPutInit(ctx context.Context, hdr object.Object, _ var opts PutInitOptions - if err := stream.Init(hdr.CutPayload(), commonPrm, opts); err != nil { + pw, err := stream.WriteHeader(hdr.CutPayload(), commonPrm, opts) + if err != nil { return nil, err } - return (*testPayloadStream)(stream), nil + return &testPayloadStream{PayloadWriter: pw}, nil } func (m *serviceClient) ReplicateObject(context.Context, oid.ID, io.ReadSeeker, neofscrypto.Signer, bool) (*neofscrypto.Signature, error) { @@ -554,17 +556,12 @@ func (m *serviceClient) ForEachGRPCConn(context.Context, func(context.Context, * panic("unimplemented") } -type testPayloadStream Streamer - -func (x *testPayloadStream) Write(p []byte) (int, error) { - if err := (*Streamer)(x).SendChunk(p); err != nil { - return 0, err - } - return len(p), nil +type testPayloadStream struct { + internal.PayloadWriter } func (x *testPayloadStream) Close() error { - _, err := (*Streamer)(x).Close() + _, err := x.PayloadWriter.Close() return err } @@ -608,11 +605,13 @@ func storeObjectWithSession(t *testing.T, svc *Service, obj object.Object, st se require.NoError(t, err) var opts PutInitOptions - require.NoError(t, stream.Init(obj.CutPayload(), commonPrm, opts)) + pw, err := stream.WriteHeader(obj.CutPayload(), commonPrm, opts) + require.NoError(t, err) - require.NoError(t, stream.SendChunk(obj.Payload())) + _, err = pw.Write(obj.Payload()) + require.NoError(t, err) - _, err = stream.Close() + _, err = pw.Close() require.NoError(t, err) } diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index ba0f4581cc..35154189fd 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -11,7 +11,6 @@ import ( "github.com/nspcc-dev/neofs-sdk-go/container" neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" "github.com/nspcc-dev/neofs-sdk-go/object" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" "github.com/nspcc-dev/neofs-sdk-go/user" ) @@ -23,13 +22,17 @@ type Streamer struct { target internal.Target } -func (p *Streamer) Init(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) error { +func (p *Streamer) WriteHeader(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.PayloadWriter, error) { // initialize destination target if err := p.initTarget(hdr, cp, opts); err != nil { - return err + return nil, err } - return p.target.WriteHeader(hdr) + if err := p.target.WriteHeader(hdr); err != nil { + return nil, err + } + + return p.target, nil } func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) error { @@ -222,20 +225,6 @@ func (p *Streamer) newCommonTarget(cp *util.CommonPrm, opts PutInitOptions, rela } } -func (p *Streamer) SendChunk(chunk []byte) error { - _, err := p.target.Write(chunk) - return err -} - -func (p *Streamer) Close() (oid.ID, error) { - id, err := p.target.Close() - if err != nil { - return oid.ID{}, err - } - - return id, nil -} - func metaAttribute(cnr container.Container) string { return cnr.Attribute("__NEOFS__METAINFO_CONSISTENCY") } diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index 1610e92628..5910b26819 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -250,6 +250,8 @@ type putStream struct { signer ecdsa.PrivateKey base *putsvc.Streamer + payloadWriter internal.PayloadWriter + cacheReqs bool initReq *protoobject.PutRequest chunkReqs []*protoobject.PutRequest @@ -351,7 +353,7 @@ func (x *putStream) forwardRequest(req *protoobject.PutRequest) error { var opts putsvc.PutInitOptions opts.WithCopiesNumber(v.Init.CopiesNumber) opts.WithRelay(x.sendToRemoteNode) - if err = x.base.Init(obj, cp, opts); err != nil { + if x.payloadWriter, err = x.base.WriteHeader(obj, cp, opts); err != nil { return fmt.Errorf("could not init object put stream: %w", err) } @@ -372,7 +374,7 @@ func (x *putStream) forwardRequest(req *protoobject.PutRequest) error { return putsvc.ErrWrongPayloadSize } } - if err := x.base.SendChunk(c); err != nil { + if _, err := x.payloadWriter.Write(c); err != nil { return fmt.Errorf("could not send payload chunk: %w", err) } if !x.cacheReqs { @@ -392,7 +394,7 @@ func (x *putStream) close() (*protoobject.PutResponse, error) { return nil, putsvc.ErrWrongPayloadSize } - id, err := x.base.Close() + id, err := x.payloadWriter.Close() if err != nil { return nil, fmt.Errorf("could not object put stream: %w", err) } From 95e9c29ec2c1e163cb64082ae3a2dba3d0575070 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 16:22:06 +0300 Subject: [PATCH 17/27] sn/object: Do not keep `internal.Target` in `Streamer` struct The field was used in single method, so was no sense in being a field. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/streamer.go | 33 ++++++++++++----------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 35154189fd..ca216bf2f9 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -18,48 +18,45 @@ type Streamer struct { *Service ctx context.Context - - target internal.Target } func (p *Streamer) WriteHeader(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.PayloadWriter, error) { // initialize destination target - if err := p.initTarget(hdr, cp, opts); err != nil { + target, err := p.initTarget(hdr, cp, opts) + if err != nil { return nil, err } - if err := p.target.WriteHeader(hdr); err != nil { + if err := target.WriteHeader(hdr); err != nil { return nil, err } - return p.target, nil + return target, nil } -func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) error { +func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.Target, error) { // prepare needed put parameters if err := p.prepareOptions(hdr, cp, &opts); err != nil { - return fmt.Errorf("(%T) could not prepare put parameters: %w", p, err) + return nil, fmt.Errorf("(%T) could not prepare put parameters: %w", p, err) } maxPayloadSz := p.maxSizeSrc.MaxObjectSize() if maxPayloadSz == 0 { - return fmt.Errorf("(%T) could not obtain max object size parameter", p) + return nil, fmt.Errorf("(%T) could not obtain max object size parameter", p) } homomorphicChecksumRequired := !opts.cnr.IsHomomorphicHashingDisabled() if hdr.Signature() != nil { // prepare untrusted-Put object target - p.target = &validatingTarget{ + return &validatingTarget{ nextTarget: p.newCommonTarget(cp, opts, opts.relay), fmt: p.fmtValidator, maxPayloadSz: maxPayloadSz, homomorphicChecksumRequired: homomorphicChecksumRequired, - } - - return nil + }, nil } sToken := cp.SessionToken() @@ -78,7 +75,7 @@ func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutIn sessionKey, err := p.keyStorage.GetKey(sessionInfo) if err != nil { - return fmt.Errorf("(%T) could not receive session key: %w", p, err) + return nil, fmt.Errorf("(%T) could not receive session key: %w", p, err) } signer := neofsecdsa.SignerRFC6979(*sessionKey) @@ -88,19 +85,19 @@ func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutIn if sToken == nil { ownerObj := hdr.Owner() if ownerObj.IsZero() { - return errors.New("missing object owner") + return nil, errors.New("missing object owner") } ownerSession := user.NewFromECDSAPublicKey(signer.PublicKey) if ownerObj != ownerSession { - return fmt.Errorf("(%T) session token is missing but object owner id is different from the default key", p) + return nil, fmt.Errorf("(%T) session token is missing but object owner id is different from the default key", p) } } sessionSigner := user.NewAutoIDSigner(*sessionKey) opts.sessionSigner = sessionSigner - p.target = &validatingTarget{ + return &validatingTarget{ fmt: p.fmtValidator, unpreparedObject: true, nextTarget: newSlicingTarget( @@ -113,9 +110,7 @@ func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutIn p.newCommonTarget(cp, opts, nil), ), homomorphicChecksumRequired: homomorphicChecksumRequired, - } - - return nil + }, nil } func (p *Streamer) prepareOptions(hdr *object.Object, cp *util.CommonPrm, opts *PutInitOptions) error { From 4808244ec18a2010465b1506a4d1119d4fc3883a Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 16:31:00 +0300 Subject: [PATCH 18/27] sn/object: Replace context struct field with a parameter More accepted approach in Go. Signed-off-by: Leonard Lyubich --- pkg/services/object/delete/util.go | 2 +- pkg/services/object/put/service.go | 3 +-- pkg/services/object/put/service_test.go | 4 ++-- pkg/services/object/put/streamer.go | 18 ++++++++---------- pkg/services/object/server.go | 2 +- 5 files changed, 13 insertions(+), 16 deletions(-) diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go index 610f037a5d..1c64a21189 100644 --- a/pkg/services/object/delete/util.go +++ b/pkg/services/object/delete/util.go @@ -17,7 +17,7 @@ func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) { var opts putsvc.PutInitOptions - pw, err := streamer.WriteHeader(exec.tombstoneObj.CutPayload(), exec.commonParameters(), opts) + pw, err := streamer.WriteHeader(exec.context(), exec.tombstoneObj.CutPayload(), exec.commonParameters(), opts) if err != nil { return nil, err } diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index 6b7c48b5f6..034190d4d5 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -146,10 +146,9 @@ func NewService(transport Transport, neoFSNet NeoFSNetwork, m *meta.Meta, opts . } } -func (p *Service) Put(ctx context.Context) (*Streamer, error) { +func (p *Service) Put(context.Context) (*Streamer, error) { return &Streamer{ Service: p, - ctx: ctx, }, nil } diff --git a/pkg/services/object/put/service_test.go b/pkg/services/object/put/service_test.go index e2a1aab73f..4b326869d4 100644 --- a/pkg/services/object/put/service_test.go +++ b/pkg/services/object/put/service_test.go @@ -503,7 +503,7 @@ func (m *serviceClient) ObjectPutInit(ctx context.Context, hdr object.Object, _ var opts PutInitOptions - pw, err := stream.WriteHeader(hdr.CutPayload(), commonPrm, opts) + pw, err := stream.WriteHeader(ctx, hdr.CutPayload(), commonPrm, opts) if err != nil { return nil, err } @@ -605,7 +605,7 @@ func storeObjectWithSession(t *testing.T, svc *Service, obj object.Object, st se require.NoError(t, err) var opts PutInitOptions - pw, err := stream.WriteHeader(obj.CutPayload(), commonPrm, opts) + pw, err := stream.WriteHeader(context.Background(), obj.CutPayload(), commonPrm, opts) require.NoError(t, err) _, err = pw.Write(obj.Payload()) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index ca216bf2f9..364721c24a 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -16,13 +16,11 @@ import ( type Streamer struct { *Service - - ctx context.Context } -func (p *Streamer) WriteHeader(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.PayloadWriter, error) { +func (p *Streamer) WriteHeader(ctx context.Context, hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.PayloadWriter, error) { // initialize destination target - target, err := p.initTarget(hdr, cp, opts) + target, err := p.initTarget(ctx, hdr, cp, opts) if err != nil { return nil, err } @@ -34,7 +32,7 @@ func (p *Streamer) WriteHeader(hdr *object.Object, cp *util.CommonPrm, opts PutI return target, nil } -func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.Target, error) { +func (p *Streamer) initTarget(ctx context.Context, hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.Target, error) { // prepare needed put parameters if err := p.prepareOptions(hdr, cp, &opts); err != nil { return nil, fmt.Errorf("(%T) could not prepare put parameters: %w", p, err) @@ -50,7 +48,7 @@ func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutIn if hdr.Signature() != nil { // prepare untrusted-Put object target return &validatingTarget{ - nextTarget: p.newCommonTarget(cp, opts, opts.relay), + nextTarget: p.newCommonTarget(ctx, cp, opts, opts.relay), fmt: p.fmtValidator, maxPayloadSz: maxPayloadSz, @@ -101,13 +99,13 @@ func (p *Streamer) initTarget(hdr *object.Object, cp *util.CommonPrm, opts PutIn fmt: p.fmtValidator, unpreparedObject: true, nextTarget: newSlicingTarget( - p.ctx, + ctx, maxPayloadSz, !homomorphicChecksumRequired, sessionSigner, sToken, p.networkState.CurrentEpoch(), - p.newCommonTarget(cp, opts, nil), + p.newCommonTarget(ctx, cp, opts, nil), ), homomorphicChecksumRequired: homomorphicChecksumRequired, }, nil @@ -177,7 +175,7 @@ func (p *Streamer) prepareOptions(hdr *object.Object, cp *util.CommonPrm, opts * return nil } -func (p *Streamer) newCommonTarget(cp *util.CommonPrm, opts PutInitOptions, relayFn RelayFunc) internal.Target { +func (p *Streamer) newCommonTarget(ctx context.Context, cp *util.CommonPrm, opts PutInitOptions, relayFn RelayFunc) internal.Target { var relay func(nodeDesc) error if relayFn != nil { relay = func(node nodeDesc) error { @@ -191,7 +189,7 @@ func (p *Streamer) newCommonTarget(cp *util.CommonPrm, opts PutInitOptions, rela } return &distributedTarget{ - opCtx: p.ctx, + opCtx: ctx, fsState: p.networkState, networkMagicNumber: p.networkMagic, metaSvc: p.metaSvc, diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index 5910b26819..43405c79a4 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -353,7 +353,7 @@ func (x *putStream) forwardRequest(req *protoobject.PutRequest) error { var opts putsvc.PutInitOptions opts.WithCopiesNumber(v.Init.CopiesNumber) opts.WithRelay(x.sendToRemoteNode) - if x.payloadWriter, err = x.base.WriteHeader(obj, cp, opts); err != nil { + if x.payloadWriter, err = x.base.WriteHeader(x.ctx, obj, cp, opts); err != nil { return fmt.Errorf("could not init object put stream: %w", err) } From e252c573b9dfd3f4ec2c18ddfba5e101d5467f50 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 16:49:08 +0300 Subject: [PATCH 19/27] sn/object: Drop `putsvc.Streamer` type It became a redundant wrapper around `Service` type. Signed-off-by: Leonard Lyubich --- cmd/neofs-node/object.go | 14 +++++--------- pkg/services/object/delete/util.go | 7 +------ pkg/services/object/put/local.go | 2 +- pkg/services/object/put/service.go | 6 ------ pkg/services/object/put/service_test.go | 12 ++---------- pkg/services/object/put/streamer.go | 12 ++++-------- pkg/services/object/server.go | 19 ++++++++++--------- pkg/services/object/server_test.go | 4 +++- 8 files changed, 26 insertions(+), 50 deletions(-) diff --git a/cmd/neofs-node/object.go b/cmd/neofs-node/object.go index 472e022cc0..ecf1ac0066 100644 --- a/cmd/neofs-node/object.go +++ b/cmd/neofs-node/object.go @@ -50,7 +50,7 @@ import ( ) type objectSvc struct { - put *putsvc.Service + *putsvc.Service search *searchsvc.Service @@ -70,10 +70,6 @@ func (c *cfg) MaxObjectSize() uint64 { return sz } -func (s *objectSvc) Put(ctx context.Context) (*putsvc.Streamer, error) { - return s.put.Put(ctx) -} - func (s *objectSvc) Head(ctx context.Context, prm getsvc.HeadPrm) error { return s.get.Head(ctx, prm) } @@ -277,10 +273,10 @@ func initObjectService(c *cfg) { ) objSvc := &objectSvc{ - put: sPut, - search: sSearch, - get: sGet, - delete: sDelete, + Service: sPut, + search: sSearch, + get: sGet, + delete: sDelete, } // cachedFirstObjectsNumber is a total cached objects number; the V2 split scheme diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go index 1c64a21189..b47ea4dda4 100644 --- a/pkg/services/object/delete/util.go +++ b/pkg/services/object/delete/util.go @@ -8,16 +8,11 @@ import ( type putSvcWrapper putsvc.Service func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) { - streamer, err := (*putsvc.Service)(w).Put(exec.context()) - if err != nil { - return nil, err - } - payload := exec.tombstoneObj.Payload() var opts putsvc.PutInitOptions - pw, err := streamer.WriteHeader(exec.context(), exec.tombstoneObj.CutPayload(), exec.commonParameters(), opts) + pw, err := (*putsvc.Service)(w).InitPut(exec.context(), exec.tombstoneObj.CutPayload(), exec.commonParameters(), opts) if err != nil { return nil, err } diff --git a/pkg/services/object/put/local.go b/pkg/services/object/put/local.go index cf8a9d5be8..abe9671e05 100644 --- a/pkg/services/object/put/local.go +++ b/pkg/services/object/put/local.go @@ -69,7 +69,7 @@ func putObjectLocally(storage ObjectStorage, obj *object.Object, meta objectCore // ValidateAndStoreObjectLocally checks format of given object and, if it's // correct, stores it in the underlying local object storage. Serves operation -// similar to local-only [Service.Put] one. +// similar to local-only [Service.InitPut] one. func (p *Service) ValidateAndStoreObjectLocally(obj object.Object) error { cnrID := obj.GetContainerID() if cnrID.IsZero() { diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index 034190d4d5..e08e538d9e 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -146,12 +146,6 @@ func NewService(transport Transport, neoFSNet NeoFSNetwork, m *meta.Meta, opts . } } -func (p *Service) Put(context.Context) (*Streamer, error) { - return &Streamer{ - Service: p, - }, nil -} - func WithKeyStorage(v *objutil.KeyStorage) Option { return func(c *cfg) { c.keyStorage = v diff --git a/pkg/services/object/put/service_test.go b/pkg/services/object/put/service_test.go index 4b326869d4..5d70d8c26a 100644 --- a/pkg/services/object/put/service_test.go +++ b/pkg/services/object/put/service_test.go @@ -487,11 +487,6 @@ func (m *serviceClient) ContainerAnnounceUsedSpace(context.Context, []container. } func (m *serviceClient) ObjectPutInit(ctx context.Context, hdr object.Object, _ user.Signer, _ client.PrmObjectPutInit) (client.ObjectWriter, error) { - stream, err := (*Service)(m).Put(ctx) - if err != nil { - return nil, err - } - // TODO: following is needed because struct parameters privatize some data. Refactor to avoid this. localReq := &protoobject.PutRequest{ MetaHeader: &protosession.RequestMetaHeader{Ttl: 1}, @@ -503,7 +498,7 @@ func (m *serviceClient) ObjectPutInit(ctx context.Context, hdr object.Object, _ var opts PutInitOptions - pw, err := stream.WriteHeader(ctx, hdr.CutPayload(), commonPrm, opts) + pw, err := (*Service)(m).InitPut(ctx, hdr.CutPayload(), commonPrm, opts) if err != nil { return nil, err } @@ -591,9 +586,6 @@ func (x *testCluster) resetAllStoredObjects() { } func storeObjectWithSession(t *testing.T, svc *Service, obj object.Object, st session.Object) { - stream, err := svc.Put(context.Background()) - require.NoError(t, err) - req := &protoobject.PutRequest{ MetaHeader: &protosession.RequestMetaHeader{ Ttl: 2, @@ -605,7 +597,7 @@ func storeObjectWithSession(t *testing.T, svc *Service, obj object.Object, st se require.NoError(t, err) var opts PutInitOptions - pw, err := stream.WriteHeader(context.Background(), obj.CutPayload(), commonPrm, opts) + pw, err := svc.InitPut(context.Background(), obj.CutPayload(), commonPrm, opts) require.NoError(t, err) _, err = pw.Write(obj.Payload()) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 364721c24a..dba567ef9d 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -14,11 +14,7 @@ import ( "github.com/nspcc-dev/neofs-sdk-go/user" ) -type Streamer struct { - *Service -} - -func (p *Streamer) WriteHeader(ctx context.Context, hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.PayloadWriter, error) { +func (p *Service) InitPut(ctx context.Context, hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.PayloadWriter, error) { // initialize destination target target, err := p.initTarget(ctx, hdr, cp, opts) if err != nil { @@ -32,7 +28,7 @@ func (p *Streamer) WriteHeader(ctx context.Context, hdr *object.Object, cp *util return target, nil } -func (p *Streamer) initTarget(ctx context.Context, hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.Target, error) { +func (p *Service) initTarget(ctx context.Context, hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.Target, error) { // prepare needed put parameters if err := p.prepareOptions(hdr, cp, &opts); err != nil { return nil, fmt.Errorf("(%T) could not prepare put parameters: %w", p, err) @@ -111,7 +107,7 @@ func (p *Streamer) initTarget(ctx context.Context, hdr *object.Object, cp *util. }, nil } -func (p *Streamer) prepareOptions(hdr *object.Object, cp *util.CommonPrm, opts *PutInitOptions) error { +func (p *Service) prepareOptions(hdr *object.Object, cp *util.CommonPrm, opts *PutInitOptions) error { localOnly := cp.LocalOnly() if localOnly && opts.copiesNumber > 1 { return errors.New("storage of multiple object replicas is requested for a local operation") @@ -175,7 +171,7 @@ func (p *Streamer) prepareOptions(hdr *object.Object, cp *util.CommonPrm, opts * return nil } -func (p *Streamer) newCommonTarget(ctx context.Context, cp *util.CommonPrm, opts PutInitOptions, relayFn RelayFunc) internal.Target { +func (p *Service) newCommonTarget(ctx context.Context, cp *util.CommonPrm, opts PutInitOptions, relayFn RelayFunc) internal.Target { var relay func(nodeDesc) error if relayFn != nil { relay = func(node nodeDesc) error { diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index 43405c79a4..3af742309c 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -50,11 +50,15 @@ import ( "google.golang.org/grpc" ) +type putHandler interface { + InitPut(context.Context, *object.Object, *objutil.CommonPrm, putsvc.PutInitOptions) (internal.PayloadWriter, error) +} + // Handlers represents storage node's internal handler Object service op // payloads. type Handlers interface { Get(context.Context, getsvc.Prm) error - Put(context.Context) (*putsvc.Streamer, error) + putHandler Head(context.Context, getsvc.HeadPrm) error Search(context.Context, searchsvc.Prm) error Delete(context.Context, deletesvc.Prm) error @@ -248,7 +252,7 @@ func (s *Server) sendStatusPutResponse(stream protoobject.ObjectService_PutServe type putStream struct { ctx context.Context signer ecdsa.PrivateKey - base *putsvc.Streamer + base putHandler payloadWriter internal.PayloadWriter @@ -259,7 +263,7 @@ type putStream struct { expBytes, recvBytes uint64 // payload } -func newIntermediatePutStream(signer ecdsa.PrivateKey, base *putsvc.Streamer, ctx context.Context) *putStream { +func newIntermediatePutStream(signer ecdsa.PrivateKey, base putHandler, ctx context.Context) *putStream { return &putStream{ ctx: ctx, signer: signer, @@ -353,7 +357,7 @@ func (x *putStream) forwardRequest(req *protoobject.PutRequest) error { var opts putsvc.PutInitOptions opts.WithCopiesNumber(v.Init.CopiesNumber) opts.WithRelay(x.sendToRemoteNode) - if x.payloadWriter, err = x.base.WriteHeader(x.ctx, obj, cp, opts); err != nil { + if x.payloadWriter, err = x.base.InitPut(x.ctx, obj, cp, opts); err != nil { return fmt.Errorf("could not init object put stream: %w", err) } @@ -408,18 +412,15 @@ func (x *putStream) close() (*protoobject.PutResponse, error) { func (s *Server) Put(gStream protoobject.ObjectService_PutServer) error { t := time.Now() - stream, err := s.handlers.Put(gStream.Context()) + var err error defer func() { s.pushOpExecResult(stat.MethodObjectPut, err, t) }() - if err != nil { - return err - } var req *protoobject.PutRequest var resp *protoobject.PutResponse var headerWas bool - ps := newIntermediatePutStream(s.signer, stream, gStream.Context()) + ps := newIntermediatePutStream(s.signer, s.handlers, gStream.Context()) for { if req, err = gStream.Recv(); err != nil { if errors.Is(err, io.EOF) { diff --git a/pkg/services/object/server_test.go b/pkg/services/object/server_test.go index ff7132bb7d..ecda980e91 100644 --- a/pkg/services/object/server_test.go +++ b/pkg/services/object/server_test.go @@ -24,8 +24,10 @@ import ( v2 "github.com/nspcc-dev/neofs-node/pkg/services/object/acl/v2" deletesvc "github.com/nspcc-dev/neofs-node/pkg/services/object/delete" getsvc "github.com/nspcc-dev/neofs-node/pkg/services/object/get" + "github.com/nspcc-dev/neofs-node/pkg/services/object/internal" putsvc "github.com/nspcc-dev/neofs-node/pkg/services/object/put" searchsvc "github.com/nspcc-dev/neofs-node/pkg/services/object/search" + "github.com/nspcc-dev/neofs-node/pkg/services/object/util" "github.com/nspcc-dev/neofs-sdk-go/client" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" "github.com/nspcc-dev/neofs-sdk-go/container" @@ -59,7 +61,7 @@ func (x noCallObjectService) Get(context.Context, getsvc.Prm) error { panic("must not be called") } -func (x noCallObjectService) Put(context.Context) (*putsvc.Streamer, error) { +func (x noCallObjectService) InitPut(context.Context, *object.Object, *util.CommonPrm, putsvc.PutInitOptions) (internal.PayloadWriter, error) { panic("must not be called") } From 0e739bdf6d533220c72f166167f1d62ef9e09cd4 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 16:54:05 +0300 Subject: [PATCH 20/27] sn/object: Export struct fields instead of providing setters The original intention was to remove the nil check and chaining within the setters themselves. However, replacing them removes even more code. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/prm.go | 20 ++------------------ pkg/services/object/put/streamer.go | 6 +++--- pkg/services/object/server.go | 4 ++-- 3 files changed, 7 insertions(+), 23 deletions(-) diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go index 29da3bb419..f59b5fff3d 100644 --- a/pkg/services/object/put/prm.go +++ b/pkg/services/object/put/prm.go @@ -13,9 +13,9 @@ type RelayFunc = func(client.NodeInfo, client.MultiAddressClient) error type PutInitOptions struct { cnr containerSDK.Container - copiesNumber uint32 + CopiesNumber uint32 - relay RelayFunc + Relay RelayFunc containerNodes ContainerNodes ecPart iec.PartInfo @@ -24,19 +24,3 @@ type PutInitOptions struct { localNodeSigner neofscrypto.Signer sessionSigner neofscrypto.Signer } - -func (p *PutInitOptions) WithRelay(f RelayFunc) *PutInitOptions { - if p != nil { - p.relay = f - } - - return p -} - -func (p *PutInitOptions) WithCopiesNumber(cn uint32) *PutInitOptions { - if p != nil { - p.copiesNumber = cn - } - - return p -} diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index dba567ef9d..37cc236806 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -44,7 +44,7 @@ func (p *Service) initTarget(ctx context.Context, hdr *object.Object, cp *util.C if hdr.Signature() != nil { // prepare untrusted-Put object target return &validatingTarget{ - nextTarget: p.newCommonTarget(ctx, cp, opts, opts.relay), + nextTarget: p.newCommonTarget(ctx, cp, opts, opts.Relay), fmt: p.fmtValidator, maxPayloadSz: maxPayloadSz, @@ -109,7 +109,7 @@ func (p *Service) initTarget(ctx context.Context, hdr *object.Object, cp *util.C func (p *Service) prepareOptions(hdr *object.Object, cp *util.CommonPrm, opts *PutInitOptions) error { localOnly := cp.LocalOnly() - if localOnly && opts.copiesNumber > 1 { + if localOnly && opts.CopiesNumber > 1 { return errors.New("storage of multiple object replicas is requested for a local operation") } @@ -193,7 +193,7 @@ func (p *Service) newCommonTarget(ctx context.Context, cp *util.CommonPrm, opts log: p.log, neoFSNet: p.neoFSNet, remotePool: p.remotePool, - linearReplNum: uint(opts.copiesNumber), + linearReplNum: uint(opts.CopiesNumber), }, localStorage: p.localStore, keyStorage: p.keyStorage, diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index 3af742309c..16852dee99 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -355,8 +355,8 @@ func (x *putStream) forwardRequest(req *protoobject.PutRequest) error { } var opts putsvc.PutInitOptions - opts.WithCopiesNumber(v.Init.CopiesNumber) - opts.WithRelay(x.sendToRemoteNode) + opts.CopiesNumber = v.Init.CopiesNumber + opts.Relay = x.sendToRemoteNode if x.payloadWriter, err = x.base.InitPut(x.ctx, obj, cp, opts); err != nil { return fmt.Errorf("could not init object put stream: %w", err) } From 84d3910a7a27f1545efb4d2ce44ade240e759f99 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 17:08:16 +0300 Subject: [PATCH 21/27] sn/object: Inline and drop once used struct Signed-off-by: Leonard Lyubich --- pkg/services/object/put/distibuted_test.go | 16 ++++----- pkg/services/object/put/distributed.go | 42 ++++++++++------------ pkg/services/object/put/ec.go | 4 +-- pkg/services/object/put/streamer.go | 18 +++++----- 4 files changed, 37 insertions(+), 43 deletions(-) diff --git a/pkg/services/object/put/distibuted_test.go b/pkg/services/object/put/distibuted_test.go index b731eafd3d..fb623c6e7b 100644 --- a/pkg/services/object/put/distibuted_test.go +++ b/pkg/services/object/put/distibuted_test.go @@ -68,7 +68,7 @@ func TestIterateNodesForObject(t *testing.T) { cnrNodes[2][0].SetPublicKey(cnrNodes[0][1].PublicKey()) cnrNodes[1][1].SetPublicKey(cnrNodes[0][2].PublicKey()) var rwp testWorkerPool - iter := placementIterator{ + iter := distributedTarget{ log: zap.NewNop(), neoFSNet: testNetwork{ localPubKey: cnrNodes[0][2].PublicKey(), @@ -156,7 +156,7 @@ func TestIterateNodesForObject(t *testing.T) { objID := oidtest.ID() cnrNodes := allocNodes([]uint{3, 3, 2}) cnrNodes[1][1].SetPublicKey(cnrNodes[0][1].PublicKey()) - iter := placementIterator{ + iter := distributedTarget{ log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: new(testWorkerPool), @@ -192,7 +192,7 @@ func TestIterateNodesForObject(t *testing.T) { objID := oidtest.ID() cnrNodes := allocNodes([]uint{2, 3, 2}) cnrNodes[1][2].SetPublicKey(cnrNodes[0][1].PublicKey()) - iter := placementIterator{ + iter := distributedTarget{ log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: new(testWorkerPool), @@ -234,7 +234,7 @@ func TestIterateNodesForObject(t *testing.T) { nFail: 5, err: errors.New("any worker pool error"), } - iter := placementIterator{ + iter := distributedTarget{ log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: &wp, @@ -273,7 +273,7 @@ func TestIterateNodesForObject(t *testing.T) { objID := oidtest.ID() cnrNodes := allocNodes([]uint{2, 3, 1}) var wp testWorkerPool - iter := placementIterator{ + iter := distributedTarget{ log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: &wp, @@ -306,7 +306,7 @@ func TestIterateNodesForObject(t *testing.T) { cnrNodes := allocNodes([]uint{2, 3, 1}) cnrNodes[1][2].SetNetworkEndpoints("definitely invalid network address") var wp testWorkerPool - iter := placementIterator{ + iter := distributedTarget{ log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: &wp, @@ -342,7 +342,7 @@ func TestIterateNodesForObject(t *testing.T) { objID := oidtest.ID() cnrNodes := allocNodes([]uint{2, 3, 1}) var wp testWorkerPool - iter := placementIterator{ + iter := distributedTarget{ log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: &wp, @@ -373,7 +373,7 @@ func TestIterateNodesForObject(t *testing.T) { t.Run("return only after worker pool finished", func(t *testing.T) { objID := oidtest.ID() cnrNodes := allocNodes([]uint{2, 3, 1}) - iter := placementIterator{ + iter := distributedTarget{ log: zap.NewNop(), neoFSNet: new(testNetwork), remotePool: &testWorkerPool{ diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index 5ac89d4861..84f180792f 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -30,7 +30,13 @@ import ( type distributedTarget struct { opCtx context.Context - placementIterator placementIterator + log *zap.Logger + neoFSNet NeoFSNetwork + remotePool util.WorkerPool + + // when non-zero, this setting simplifies the object's storage policy + // requirements to a fixed number of object replicas to be retained + linearReplNum uint obj *objectSDK.Object networkMagicNumber uint32 @@ -176,7 +182,7 @@ func (t *distributedTarget) saveObject(obj objectSDK.Object, objMeta object.Cont typ := obj.Type() broadcast := typ == objectSDK.TypeTombstone || typ == objectSDK.TypeLink || typ == objectSDK.TypeLock || len(obj.Children()) > 0 return t.distributeObject(obj, objMeta, encObj, func(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject) error { - return t.placementIterator.iterateNodesForObject(obj.GetID(), repRules, objNodeLists, broadcast, func(node nodeDesc) error { + return t.iterateNodesForObject(obj.GetID(), repRules, objNodeLists, broadcast, func(node nodeDesc) error { return t.sendObject(obj, objMeta, encObj, node) }) }) @@ -212,7 +218,7 @@ func (t *distributedTarget) distributeObject(obj objectSDK.Object, objMeta objec id := obj.GetID() var err error if t.localOnly { - var l = t.placementIterator.log.With(zap.Stringer("oid", id)) + var l = t.log.With(zap.Stringer("oid", id)) err = t.writeObjectLocally(obj, objMeta, encObj) if err != nil { @@ -267,7 +273,7 @@ func (t *distributedTarget) distributeObject(obj objectSDK.Object, objMeta objec } } - t.placementIterator.log.Debug("submitted object meta information", zap.Stringer("addr", addr)) + t.log.Debug("submitted object meta information", zap.Stringer("addr", addr)) } return nil @@ -328,7 +334,7 @@ func (t *distributedTarget) sendObject(obj objectSDK.Object, objMeta object.Cont if t.localNodeInContainer && t.metainfoConsistencyAttr != "" { // These should technically be errors, but we don't have // a complete implementation now, so errors are substituted with logs. - var l = t.placementIterator.log.With(zap.Stringer("oid", obj.GetID()), + var l = t.log.With(zap.Stringer("oid", obj.GetID()), zap.String("node", network.StringifyGroup(node.info.AddressGroup()))) sigs, err := decodeSignatures(sigsRaw) @@ -424,22 +430,12 @@ func (x errNotEnoughNodes) Error() string { x.listIndex, x.required, x.left) } -type placementIterator struct { - log *zap.Logger - neoFSNet NeoFSNetwork - remotePool util.WorkerPool - /* request-dependent */ - // when non-zero, this setting simplifies the object's storage policy - // requirements to a fixed number of object replicas to be retained - linearReplNum uint -} - -func (x placementIterator) iterateNodesForObject(obj oid.ID, replCounts []uint, nodeLists [][]netmap.NodeInfo, broadcast bool, f func(nodeDesc) error) error { - var l = x.log.With(zap.Stringer("oid", obj)) - if x.linearReplNum > 0 { +func (t *distributedTarget) iterateNodesForObject(obj oid.ID, replCounts []uint, nodeLists [][]netmap.NodeInfo, broadcast bool, f func(nodeDesc) error) error { + var l = t.log.With(zap.Stringer("oid", obj)) + if t.linearReplNum > 0 { ns := slices.Concat(nodeLists...) nodeLists = [][]netmap.NodeInfo{ns} - replCounts = []uint{x.linearReplNum} + replCounts = []uint{t.linearReplNum} } var processedNodesMtx sync.RWMutex var nextNodeGroupKeys []string @@ -513,7 +509,7 @@ func (x placementIterator) iterateNodesForObject(obj oid.ID, replCounts []uint, } continue } - if nr.desc.local = x.neoFSNet.IsLocalNodePublicKey(pk); !nr.desc.local { + if nr.desc.local = t.neoFSNet.IsLocalNodePublicKey(pk); !nr.desc.local { nr.desc.info, nr.convertErr = convertNodeInfo(nodeLists[listInd][j]) } processedNodesMtx.Lock() @@ -545,7 +541,7 @@ func (x placementIterator) iterateNodesForObject(obj oid.ID, replCounts []uint, go processNode(pks, listInd, nr, &wg) continue } - if err := x.remotePool.Submit(func() { + if err := t.remotePool.Submit(func() { processNode(pks, listInd, nr, &wg) }); err != nil { wg.Done() @@ -573,7 +569,7 @@ broadcast: if ok { continue } - if nr.desc.local = x.neoFSNet.IsLocalNodePublicKey(pk); !nr.desc.local { + if nr.desc.local = t.neoFSNet.IsLocalNodePublicKey(pk); !nr.desc.local { nr.desc.info, nr.convertErr = convertNodeInfo(nodeLists[i][j]) } processedNodesMtx.Lock() @@ -591,7 +587,7 @@ broadcast: go processNode(pks, -1, nr, &wg) continue } - if err := x.remotePool.Submit(func() { + if err := t.remotePool.Submit(func() { processNode(pks, -1, nr, &wg) }); err != nil { wg.Done() diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/put/ec.go index 324a1722dd..774b64e0d4 100644 --- a/pkg/services/object/put/ec.go +++ b/pkg/services/object/put/ec.go @@ -112,7 +112,7 @@ func (t *distributedTarget) distributeECPart(part object.Object, objMeta objectc if firstErr == nil { firstErr = fmt.Errorf("save on SN #%d (%s): %w", idx, na, err) } else { - t.placementIterator.log.Info("failed to save EC part on reserve SN", zap.Error(err), zap.Strings("addresses", na)) + t.log.Info("failed to save EC part on reserve SN", zap.Error(err), zap.Strings("addresses", na)) } if idx += total; idx >= len(nodeList) { @@ -123,7 +123,7 @@ func (t *distributedTarget) distributeECPart(part object.Object, objMeta objectc func (t *distributedTarget) saveECPartOnNode(obj object.Object, objMeta objectcore.ContentMeta, enc encodedObject, node netmap.NodeInfo) error { var n nodeDesc - n.local = t.placementIterator.neoFSNet.IsLocalNodePublicKey(node.PublicKey()) + n.local = t.neoFSNet.IsLocalNodePublicKey(node.PublicKey()) if !n.local { var err error if n.info, err = convertNodeInfo(node); err != nil { diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 37cc236806..ae0a9102d1 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -185,16 +185,14 @@ func (p *Service) newCommonTarget(ctx context.Context, cp *util.CommonPrm, opts } return &distributedTarget{ - opCtx: ctx, - fsState: p.networkState, - networkMagicNumber: p.networkMagic, - metaSvc: p.metaSvc, - placementIterator: placementIterator{ - log: p.log, - neoFSNet: p.neoFSNet, - remotePool: p.remotePool, - linearReplNum: uint(opts.CopiesNumber), - }, + opCtx: ctx, + fsState: p.networkState, + networkMagicNumber: p.networkMagic, + metaSvc: p.metaSvc, + log: p.log, + neoFSNet: p.neoFSNet, + remotePool: p.remotePool, + linearReplNum: uint(opts.CopiesNumber), localStorage: p.localStore, keyStorage: p.keyStorage, commonPrm: cp, From f1f30702a0ea44e67733b1ffc4f101aa9a586c92 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 17:24:42 +0300 Subject: [PATCH 22/27] sn/object: Do not copy `Service` fields to `distributedTarget` Use `Service` field instead. Although not all fields may be needed, this is a simpler approach anyway. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/distibuted_test.go | 86 +++++++++++++++------- pkg/services/object/put/distributed.go | 62 ++++++---------- pkg/services/object/put/ec.go | 4 +- pkg/services/object/put/streamer.go | 13 +--- 4 files changed, 85 insertions(+), 80 deletions(-) diff --git a/pkg/services/object/put/distibuted_test.go b/pkg/services/object/put/distibuted_test.go index fb623c6e7b..5a24e7506e 100644 --- a/pkg/services/object/put/distibuted_test.go +++ b/pkg/services/object/put/distibuted_test.go @@ -69,11 +69,15 @@ func TestIterateNodesForObject(t *testing.T) { cnrNodes[1][1].SetPublicKey(cnrNodes[0][2].PublicKey()) var rwp testWorkerPool iter := distributedTarget{ - log: zap.NewNop(), - neoFSNet: testNetwork{ - localPubKey: cnrNodes[0][2].PublicKey(), + svc: &Service{ + cfg: &cfg{ + log: zap.NewNop(), + remotePool: &rwp, + }, + neoFSNet: testNetwork{ + localPubKey: cnrNodes[0][2].PublicKey(), + }, }, - remotePool: &rwp, } var handlerMtx sync.Mutex var handlerCalls []nodeDesc @@ -157,9 +161,13 @@ func TestIterateNodesForObject(t *testing.T) { cnrNodes := allocNodes([]uint{3, 3, 2}) cnrNodes[1][1].SetPublicKey(cnrNodes[0][1].PublicKey()) iter := distributedTarget{ - log: zap.NewNop(), - neoFSNet: new(testNetwork), - remotePool: new(testWorkerPool), + svc: &Service{ + cfg: &cfg{ + log: zap.NewNop(), + remotePool: new(testWorkerPool), + }, + neoFSNet: new(testNetwork), + }, linearReplNum: 4, } var handlerMtx sync.Mutex @@ -193,9 +201,13 @@ func TestIterateNodesForObject(t *testing.T) { cnrNodes := allocNodes([]uint{2, 3, 2}) cnrNodes[1][2].SetPublicKey(cnrNodes[0][1].PublicKey()) iter := distributedTarget{ - log: zap.NewNop(), - neoFSNet: new(testNetwork), - remotePool: new(testWorkerPool), + svc: &Service{ + cfg: &cfg{ + log: zap.NewNop(), + remotePool: new(testWorkerPool), + }, + neoFSNet: new(testNetwork), + }, } var handlerMtx sync.Mutex var handlerCalls [][]byte @@ -235,9 +247,13 @@ func TestIterateNodesForObject(t *testing.T) { err: errors.New("any worker pool error"), } iter := distributedTarget{ - log: zap.NewNop(), - neoFSNet: new(testNetwork), - remotePool: &wp, + svc: &Service{ + cfg: &cfg{ + log: zap.NewNop(), + remotePool: &wp, + }, + neoFSNet: new(testNetwork), + }, } var handlerMtx sync.Mutex var handlerCalls [][]byte @@ -274,9 +290,13 @@ func TestIterateNodesForObject(t *testing.T) { cnrNodes := allocNodes([]uint{2, 3, 1}) var wp testWorkerPool iter := distributedTarget{ - log: zap.NewNop(), - neoFSNet: new(testNetwork), - remotePool: &wp, + svc: &Service{ + cfg: &cfg{ + log: zap.NewNop(), + remotePool: &wp, + }, + neoFSNet: new(testNetwork), + }, } var handlerMtx sync.Mutex var handlerCalls [][]byte @@ -307,9 +327,13 @@ func TestIterateNodesForObject(t *testing.T) { cnrNodes[1][2].SetNetworkEndpoints("definitely invalid network address") var wp testWorkerPool iter := distributedTarget{ - log: zap.NewNop(), - neoFSNet: new(testNetwork), - remotePool: &wp, + svc: &Service{ + cfg: &cfg{ + log: zap.NewNop(), + remotePool: &wp, + }, + neoFSNet: new(testNetwork), + }, } var handlerMtx sync.Mutex var handlerCalls [][]byte @@ -343,9 +367,13 @@ func TestIterateNodesForObject(t *testing.T) { cnrNodes := allocNodes([]uint{2, 3, 1}) var wp testWorkerPool iter := distributedTarget{ - log: zap.NewNop(), - neoFSNet: new(testNetwork), - remotePool: &wp, + svc: &Service{ + cfg: &cfg{ + log: zap.NewNop(), + remotePool: &wp, + }, + neoFSNet: new(testNetwork), + }, } var handlerMtx sync.Mutex var handlerCalls [][]byte @@ -374,11 +402,15 @@ func TestIterateNodesForObject(t *testing.T) { objID := oidtest.ID() cnrNodes := allocNodes([]uint{2, 3, 1}) iter := distributedTarget{ - log: zap.NewNop(), - neoFSNet: new(testNetwork), - remotePool: &testWorkerPool{ - err: errors.New("pool err"), - nFail: 2, + svc: &Service{ + cfg: &cfg{ + log: zap.NewNop(), + remotePool: &testWorkerPool{ + err: errors.New("pool err"), + nFail: 2, + }, + }, + neoFSNet: new(testNetwork), }, } blockCh := make(chan struct{}) diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index 84f180792f..c9550e8e3c 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -13,13 +13,9 @@ import ( iec "github.com/nspcc-dev/neofs-node/internal/ec" "github.com/nspcc-dev/neofs-node/pkg/core/client" - netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap" "github.com/nspcc-dev/neofs-node/pkg/core/object" - chaincontainer "github.com/nspcc-dev/neofs-node/pkg/morph/client/container" "github.com/nspcc-dev/neofs-node/pkg/network" - "github.com/nspcc-dev/neofs-node/pkg/services/meta" svcutil "github.com/nspcc-dev/neofs-node/pkg/services/object/util" - "github.com/nspcc-dev/neofs-node/pkg/util" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/nspcc-dev/neofs-sdk-go/netmap" objectSDK "github.com/nspcc-dev/neofs-sdk-go/object" @@ -28,24 +24,18 @@ import ( ) type distributedTarget struct { - opCtx context.Context + svc *Service - log *zap.Logger - neoFSNet NeoFSNetwork - remotePool util.WorkerPool + opCtx context.Context // when non-zero, this setting simplifies the object's storage policy // requirements to a fixed number of object replicas to be retained linearReplNum uint - obj *objectSDK.Object - networkMagicNumber uint32 - fsState netmapcore.StateDetailed + obj *objectSDK.Object - cnrClient *chaincontainer.Client metainfoConsistencyAttr string - metaSvc *meta.Meta metaMtx sync.Mutex metaSigner neofscrypto.Signer objSharedMeta []byte @@ -62,13 +52,7 @@ type distributedTarget struct { relay func(nodeDesc) error - fmt *object.FormatValidator - - localStorage ObjectStorage - clientConstructor ClientConstructor - transport Transport - commonPrm *svcutil.CommonPrm - keyStorage *svcutil.KeyStorage + commonPrm *svcutil.CommonPrm localOnly bool @@ -152,7 +136,7 @@ func (t *distributedTarget) Close() (oid.ID, error) { var objMeta object.ContentMeta if !tombOrLink || t.localNodeInContainer { var err error - if objMeta, err = t.fmt.ValidateContent(t.obj); err != nil { + if objMeta, err = t.svc.fmtValidator.ValidateContent(t.obj); err != nil { return oid.ID{}, fmt.Errorf("(%T) could not validate payload content: %w", t, err) } } @@ -218,7 +202,7 @@ func (t *distributedTarget) distributeObject(obj objectSDK.Object, objMeta objec id := obj.GetID() var err error if t.localOnly { - var l = t.log.With(zap.Stringer("oid", id)) + var l = t.svc.log.With(zap.Stringer("oid", id)) err = t.writeObjectLocally(obj, objMeta, encObj) if err != nil { @@ -253,13 +237,13 @@ func (t *distributedTarget) distributeObject(obj objectSDK.Object, objMeta objec var objAccepted chan struct{} if await { objAccepted = make(chan struct{}, 1) - t.metaSvc.NotifyObjectSuccess(objAccepted, addr) + t.svc.metaSvc.NotifyObjectSuccess(objAccepted, addr) } - err = t.cnrClient.SubmitObjectPut(t.objSharedMeta, t.collectedSignatures) + err = t.svc.cnrClient.SubmitObjectPut(t.objSharedMeta, t.collectedSignatures) if err != nil { if await { - t.metaSvc.UnsubscribeFromObject(addr) + t.svc.metaSvc.UnsubscribeFromObject(addr) } return fmt.Errorf("failed to submit %s object meta information: %w", addr, err) } @@ -267,21 +251,21 @@ func (t *distributedTarget) distributeObject(obj objectSDK.Object, objMeta objec if await { select { case <-t.opCtx.Done(): - t.metaSvc.UnsubscribeFromObject(addr) + t.svc.metaSvc.UnsubscribeFromObject(addr) return fmt.Errorf("interrupted awaiting for %s object meta information: %w", addr, t.opCtx.Err()) case <-objAccepted: } } - t.log.Debug("submitted object meta information", zap.Stringer("addr", addr)) + t.svc.log.Debug("submitted object meta information", zap.Stringer("addr", addr)) } return nil } func (t *distributedTarget) encodeObjectMetadata(obj objectSDK.Object) []byte { - currBlock := t.fsState.CurrentBlock() - currEpochDuration := t.fsState.CurrentEpochDuration() + currBlock := t.svc.networkState.CurrentBlock() + currEpochDuration := t.svc.networkState.CurrentEpochDuration() expectedVUB := (uint64(currBlock)/currEpochDuration + 2) * currEpochDuration firstObj := obj.GetFirstID() @@ -302,7 +286,7 @@ func (t *distributedTarget) encodeObjectMetadata(obj objectSDK.Object) []byte { } return object.EncodeReplicationMetaInfo(obj.GetContainerID(), obj.GetID(), firstObj, obj.GetPreviousID(), - obj.PayloadSize(), typ, deletedObjs, lockedObjs, expectedVUB, t.networkMagicNumber) + obj.PayloadSize(), typ, deletedObjs, lockedObjs, expectedVUB, t.svc.networkMagic) } func (t *distributedTarget) sendObject(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject, node nodeDesc) error { @@ -320,12 +304,12 @@ func (t *distributedTarget) sendObject(obj objectSDK.Object, objMeta object.Cont var sigsRaw []byte var err error if encObj.hdrOff > 0 { - sigsRaw, err = t.transport.SendReplicationRequestToNode(t.opCtx, encObj.b, node.info) + sigsRaw, err = t.svc.transport.SendReplicationRequestToNode(t.opCtx, encObj.b, node.info) if err != nil { err = fmt.Errorf("replicate object to remote node (key=%x): %w", node.info.PublicKey(), err) } } else { - err = putObjectToNode(t.opCtx, node.info, &obj, t.keyStorage, t.clientConstructor, t.commonPrm) + err = putObjectToNode(t.opCtx, node.info, &obj, t.svc.keyStorage, t.svc.clientConstructor, t.commonPrm) } if err != nil { return fmt.Errorf("could not close object stream: %w", err) @@ -334,7 +318,7 @@ func (t *distributedTarget) sendObject(obj objectSDK.Object, objMeta object.Cont if t.localNodeInContainer && t.metainfoConsistencyAttr != "" { // These should technically be errors, but we don't have // a complete implementation now, so errors are substituted with logs. - var l = t.log.With(zap.Stringer("oid", obj.GetID()), + var l = t.svc.log.With(zap.Stringer("oid", obj.GetID()), zap.String("node", network.StringifyGroup(node.info.AddressGroup()))) sigs, err := decodeSignatures(sigsRaw) @@ -366,7 +350,7 @@ func (t *distributedTarget) sendObject(obj objectSDK.Object, objMeta object.Cont } func (t *distributedTarget) writeObjectLocally(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject) error { - if err := putObjectLocally(t.localStorage, &obj, objMeta, &encObj); err != nil { + if err := putObjectLocally(t.svc.localStore, &obj, objMeta, &encObj); err != nil { return err } @@ -431,7 +415,7 @@ func (x errNotEnoughNodes) Error() string { } func (t *distributedTarget) iterateNodesForObject(obj oid.ID, replCounts []uint, nodeLists [][]netmap.NodeInfo, broadcast bool, f func(nodeDesc) error) error { - var l = t.log.With(zap.Stringer("oid", obj)) + var l = t.svc.log.With(zap.Stringer("oid", obj)) if t.linearReplNum > 0 { ns := slices.Concat(nodeLists...) nodeLists = [][]netmap.NodeInfo{ns} @@ -509,7 +493,7 @@ func (t *distributedTarget) iterateNodesForObject(obj oid.ID, replCounts []uint, } continue } - if nr.desc.local = t.neoFSNet.IsLocalNodePublicKey(pk); !nr.desc.local { + if nr.desc.local = t.svc.neoFSNet.IsLocalNodePublicKey(pk); !nr.desc.local { nr.desc.info, nr.convertErr = convertNodeInfo(nodeLists[listInd][j]) } processedNodesMtx.Lock() @@ -541,7 +525,7 @@ func (t *distributedTarget) iterateNodesForObject(obj oid.ID, replCounts []uint, go processNode(pks, listInd, nr, &wg) continue } - if err := t.remotePool.Submit(func() { + if err := t.svc.remotePool.Submit(func() { processNode(pks, listInd, nr, &wg) }); err != nil { wg.Done() @@ -569,7 +553,7 @@ broadcast: if ok { continue } - if nr.desc.local = t.neoFSNet.IsLocalNodePublicKey(pk); !nr.desc.local { + if nr.desc.local = t.svc.neoFSNet.IsLocalNodePublicKey(pk); !nr.desc.local { nr.desc.info, nr.convertErr = convertNodeInfo(nodeLists[i][j]) } processedNodesMtx.Lock() @@ -587,7 +571,7 @@ broadcast: go processNode(pks, -1, nr, &wg) continue } - if err := t.remotePool.Submit(func() { + if err := t.svc.remotePool.Submit(func() { processNode(pks, -1, nr, &wg) }); err != nil { wg.Done() diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/put/ec.go index 774b64e0d4..56820c25b5 100644 --- a/pkg/services/object/put/ec.go +++ b/pkg/services/object/put/ec.go @@ -112,7 +112,7 @@ func (t *distributedTarget) distributeECPart(part object.Object, objMeta objectc if firstErr == nil { firstErr = fmt.Errorf("save on SN #%d (%s): %w", idx, na, err) } else { - t.log.Info("failed to save EC part on reserve SN", zap.Error(err), zap.Strings("addresses", na)) + t.svc.log.Info("failed to save EC part on reserve SN", zap.Error(err), zap.Strings("addresses", na)) } if idx += total; idx >= len(nodeList) { @@ -123,7 +123,7 @@ func (t *distributedTarget) distributeECPart(part object.Object, objMeta objectc func (t *distributedTarget) saveECPartOnNode(obj object.Object, objMeta objectcore.ContentMeta, enc encodedObject, node netmap.NodeInfo) error { var n nodeDesc - n.local = t.neoFSNet.IsLocalNodePublicKey(node.PublicKey()) + n.local = t.svc.neoFSNet.IsLocalNodePublicKey(node.PublicKey()) if !n.local { var err error if n.info, err = convertNodeInfo(node); err != nil { diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index ae0a9102d1..bc5954d37b 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -185,27 +185,16 @@ func (p *Service) newCommonTarget(ctx context.Context, cp *util.CommonPrm, opts } return &distributedTarget{ + svc: p, opCtx: ctx, - fsState: p.networkState, - networkMagicNumber: p.networkMagic, - metaSvc: p.metaSvc, - log: p.log, - neoFSNet: p.neoFSNet, - remotePool: p.remotePool, linearReplNum: uint(opts.CopiesNumber), - localStorage: p.localStore, - keyStorage: p.keyStorage, commonPrm: cp, - clientConstructor: p.clientConstructor, - transport: p.transport, relay: relay, - fmt: p.fmtValidator, containerNodes: opts.containerNodes, ecPart: opts.ecPart, localNodeInContainer: opts.localNodeInContainer, localNodeSigner: opts.localNodeSigner, sessionSigner: opts.sessionSigner, - cnrClient: p.cfg.cnrClient, metainfoConsistencyAttr: metaAttribute(opts.cnr), metaSigner: opts.localSignerRFC6979, localOnly: cp.LocalOnly(), From c6dbbf3df112c22da681fb44a2e4771967d31cbc Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 17:31:42 +0300 Subject: [PATCH 23/27] sn/object: Rename mutex field closer to the protected one To make it clearer what it locks. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/distributed.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index c9550e8e3c..4c291a4490 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -36,10 +36,10 @@ type distributedTarget struct { metainfoConsistencyAttr string - metaMtx sync.Mutex - metaSigner neofscrypto.Signer - objSharedMeta []byte - collectedSignatures [][]byte + metaSigner neofscrypto.Signer + objSharedMeta []byte + collectedSignaturesMtx sync.Mutex + collectedSignatures [][]byte containerNodes ContainerNodes localNodeInContainer bool @@ -336,9 +336,9 @@ func (t *distributedTarget) sendObject(obj objectSDK.Object, objMeta object.Cont continue } - t.metaMtx.Lock() + t.collectedSignaturesMtx.Lock() t.collectedSignatures = append(t.collectedSignatures, sig.Value()) - t.metaMtx.Unlock() + t.collectedSignaturesMtx.Unlock() return nil } @@ -360,9 +360,9 @@ func (t *distributedTarget) writeObjectLocally(obj objectSDK.Object, objMeta obj return fmt.Errorf("failed to sign object metadata: %w", err) } - t.metaMtx.Lock() + t.collectedSignaturesMtx.Lock() t.collectedSignatures = append(t.collectedSignatures, sig) - t.metaMtx.Unlock() + t.collectedSignaturesMtx.Unlock() } return nil From 9160803c1ae33586b7db03c62135d1b70f1cd4d3 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 17:47:56 +0300 Subject: [PATCH 24/27] sn/object: Struct out state fields of `dsitributedTarget` There are a lot of fields, so at least the state variables are separated. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/distributed.go | 76 ++++++++++++++------------ 1 file changed, 41 insertions(+), 35 deletions(-) diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index 4c291a4490..bd37bb1462 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -23,6 +23,19 @@ import ( "go.uber.org/zap" ) +type distributedTargetState struct { + obj *objectSDK.Object + // - object if localOnly + // - replicate request if localNodeInContainer + // - payload otherwise + encodedObject encodedObject + + objSharedMeta []byte + + collectedSignaturesMtx sync.Mutex + collectedSignatures [][]byte +} + type distributedTarget struct { svc *Service @@ -32,23 +45,14 @@ type distributedTarget struct { // requirements to a fixed number of object replicas to be retained linearReplNum uint - obj *objectSDK.Object - metainfoConsistencyAttr string - metaSigner neofscrypto.Signer - objSharedMeta []byte - collectedSignaturesMtx sync.Mutex - collectedSignatures [][]byte + metaSigner neofscrypto.Signer containerNodes ContainerNodes localNodeInContainer bool localNodeSigner neofscrypto.Signer sessionSigner neofscrypto.Signer - // - object if localOnly - // - replicate request if localNodeInContainer - // - payload otherwise - encodedObject encodedObject relay func(nodeDesc) error @@ -59,6 +63,8 @@ type distributedTarget struct { // When object from request is an EC part, ecPart.RuleIndex is >= 0. // Undefined when policy have no EC rules. ecPart iec.PartInfo + + state distributedTargetState } type nodeDesc struct { @@ -91,9 +97,9 @@ func (t *distributedTarget) WriteHeader(hdr *objectSDK.Object) error { if t.localNodeInContainer { var err error if t.localOnly { - t.encodedObject, err = encodeObjectWithoutPayload(*hdr, int(payloadLen)) + t.state.encodedObject, err = encodeObjectWithoutPayload(*hdr, int(payloadLen)) } else { - t.encodedObject, err = encodeReplicateRequestWithoutPayload(t.localNodeSigner, *hdr, int(payloadLen), t.metainfoConsistencyAttr != "") + t.state.encodedObject, err = encodeReplicateRequestWithoutPayload(t.localNodeSigner, *hdr, int(payloadLen), t.metainfoConsistencyAttr != "") } if err != nil { return fmt.Errorf("encode object into binary: %w", err) @@ -104,29 +110,29 @@ func (t *distributedTarget) WriteHeader(hdr *objectSDK.Object) error { putPayload(b) b = make([]byte, 0, payloadLen) } - t.encodedObject = encodedObject{b: b} + t.state.encodedObject = encodedObject{b: b} } - t.obj = hdr + t.state.obj = hdr return nil } func (t *distributedTarget) Write(p []byte) (n int, err error) { - t.encodedObject.b = append(t.encodedObject.b, p...) + t.state.encodedObject.b = append(t.state.encodedObject.b, p...) return len(p), nil } func (t *distributedTarget) Close() (oid.ID, error) { defer func() { - putPayload(t.encodedObject.b) - t.encodedObject.b = nil + putPayload(t.state.encodedObject.b) + t.state.encodedObject.b = nil }() - t.obj.SetPayload(t.encodedObject.b[t.encodedObject.pldOff:]) + t.state.obj.SetPayload(t.state.encodedObject.b[t.state.encodedObject.pldOff:]) - typ := t.obj.Type() + typ := t.state.obj.Type() tombOrLink := typ == objectSDK.TypeLink || typ == objectSDK.TypeTombstone // v2 split link object and tombstone validations are expensive routines @@ -136,17 +142,17 @@ func (t *distributedTarget) Close() (oid.ID, error) { var objMeta object.ContentMeta if !tombOrLink || t.localNodeInContainer { var err error - if objMeta, err = t.svc.fmtValidator.ValidateContent(t.obj); err != nil { + if objMeta, err = t.svc.fmtValidator.ValidateContent(t.state.obj); err != nil { return oid.ID{}, fmt.Errorf("(%T) could not validate payload content: %w", t, err) } } - err := t.saveObject(*t.obj, objMeta, t.encodedObject) + err := t.saveObject(*t.state.obj, objMeta, t.state.encodedObject) if err != nil { return oid.ID{}, err } - return t.obj.GetID(), nil + return t.state.obj.GetID(), nil } func (t *distributedTarget) saveObject(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject) error { @@ -154,7 +160,7 @@ func (t *distributedTarget) saveObject(obj objectSDK.Object, objMeta object.Cont return t.distributeObject(obj, objMeta, encObj, nil) } - objNodeLists, err := t.containerNodes.SortForObject(t.obj.GetID()) + objNodeLists, err := t.containerNodes.SortForObject(t.state.obj.GetID()) if err != nil { return fmt.Errorf("sort container nodes by object ID: %w", err) } @@ -192,11 +198,11 @@ func (t *distributedTarget) saveObject(obj objectSDK.Object, objMeta object.Cont func (t *distributedTarget) distributeObject(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject, placementFn func(obj objectSDK.Object, objMeta object.ContentMeta, encObj encodedObject) error) error { defer func() { - t.collectedSignatures = nil + t.state.collectedSignatures = nil }() if t.localNodeInContainer && t.metainfoConsistencyAttr != "" { - t.objSharedMeta = t.encodeObjectMetadata(obj) + t.state.objSharedMeta = t.encodeObjectMetadata(obj) } id := obj.GetID() @@ -218,7 +224,7 @@ func (t *distributedTarget) distributeObject(obj objectSDK.Object, objMeta objec } if t.localNodeInContainer && t.metainfoConsistencyAttr != "" { - if len(t.collectedSignatures) == 0 { + if len(t.state.collectedSignatures) == 0 { return fmt.Errorf("skip metadata chain submit for %s object: no signatures were collected", id) } @@ -240,7 +246,7 @@ func (t *distributedTarget) distributeObject(obj objectSDK.Object, objMeta objec t.svc.metaSvc.NotifyObjectSuccess(objAccepted, addr) } - err = t.svc.cnrClient.SubmitObjectPut(t.objSharedMeta, t.collectedSignatures) + err = t.svc.cnrClient.SubmitObjectPut(t.state.objSharedMeta, t.state.collectedSignatures) if err != nil { if await { t.svc.metaSvc.UnsubscribeFromObject(addr) @@ -332,13 +338,13 @@ func (t *distributedTarget) sendObject(obj objectSDK.Object, objMeta object.Cont continue } - if !sig.Verify(t.objSharedMeta) { + if !sig.Verify(t.state.objSharedMeta) { continue } - t.collectedSignaturesMtx.Lock() - t.collectedSignatures = append(t.collectedSignatures, sig.Value()) - t.collectedSignaturesMtx.Unlock() + t.state.collectedSignaturesMtx.Lock() + t.state.collectedSignatures = append(t.state.collectedSignatures, sig.Value()) + t.state.collectedSignaturesMtx.Unlock() return nil } @@ -355,14 +361,14 @@ func (t *distributedTarget) writeObjectLocally(obj objectSDK.Object, objMeta obj } if t.localNodeInContainer && t.metainfoConsistencyAttr != "" { - sig, err := t.metaSigner.Sign(t.objSharedMeta) + sig, err := t.metaSigner.Sign(t.state.objSharedMeta) if err != nil { return fmt.Errorf("failed to sign object metadata: %w", err) } - t.collectedSignaturesMtx.Lock() - t.collectedSignatures = append(t.collectedSignatures, sig) - t.collectedSignaturesMtx.Unlock() + t.state.collectedSignaturesMtx.Lock() + t.state.collectedSignatures = append(t.state.collectedSignatures, sig) + t.state.collectedSignaturesMtx.Unlock() } return nil From 45dcca441024a8655423adb54357ac3a5acb58bb Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 18:42:09 +0300 Subject: [PATCH 25/27] sn/object: Regroup `distributedTarget` struct fields Signed-off-by: Leonard Lyubich --- pkg/services/object/put/distributed.go | 25 ++++++++++--------------- pkg/services/object/put/streamer.go | 12 ++++++------ 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index bd37bb1462..f58ba0ac5c 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -37,29 +37,24 @@ type distributedTargetState struct { } type distributedTarget struct { - svc *Service - - opCtx context.Context + svc *Service + localNodeSigner neofscrypto.Signer + metaSigner neofscrypto.Signer + /* request parameters */ + opCtx context.Context + commonPrm *svcutil.CommonPrm + localOnly bool // when non-zero, this setting simplifies the object's storage policy // requirements to a fixed number of object replicas to be retained - linearReplNum uint - + linearReplNum uint metainfoConsistencyAttr string + relay func(nodeDesc) error - metaSigner neofscrypto.Signer - + /* processing data */ containerNodes ContainerNodes localNodeInContainer bool - localNodeSigner neofscrypto.Signer sessionSigner neofscrypto.Signer - - relay func(nodeDesc) error - - commonPrm *svcutil.CommonPrm - - localOnly bool - // When object from request is an EC part, ecPart.RuleIndex is >= 0. // Undefined when policy have no EC rules. ecPart iec.PartInfo diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index bc5954d37b..80330349ef 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -186,18 +186,18 @@ func (p *Service) newCommonTarget(ctx context.Context, cp *util.CommonPrm, opts return &distributedTarget{ svc: p, + localNodeSigner: opts.localNodeSigner, + metaSigner: opts.localSignerRFC6979, opCtx: ctx, - linearReplNum: uint(opts.CopiesNumber), commonPrm: cp, + localOnly: cp.LocalOnly(), + linearReplNum: uint(opts.CopiesNumber), + metainfoConsistencyAttr: metaAttribute(opts.cnr), relay: relay, containerNodes: opts.containerNodes, - ecPart: opts.ecPart, localNodeInContainer: opts.localNodeInContainer, - localNodeSigner: opts.localNodeSigner, sessionSigner: opts.sessionSigner, - metainfoConsistencyAttr: metaAttribute(opts.cnr), - metaSigner: opts.localSignerRFC6979, - localOnly: cp.LocalOnly(), + ecPart: opts.ecPart, } } From d6634d765a441e2f0f93ac219a630bb38ba8ee0b Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 18:51:35 +0300 Subject: [PATCH 26/27] sn/object: Get rid of `newCommonTarget` function Signed-off-by: Leonard Lyubich --- pkg/services/object/put/streamer.go | 60 +++++++++++++---------------- 1 file changed, 27 insertions(+), 33 deletions(-) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 80330349ef..21b1a819b6 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -41,10 +41,34 @@ func (p *Service) initTarget(ctx context.Context, hdr *object.Object, cp *util.C homomorphicChecksumRequired := !opts.cnr.IsHomomorphicHashingDisabled() + target := &distributedTarget{ + svc: p, + localNodeSigner: opts.localNodeSigner, + metaSigner: opts.localSignerRFC6979, + opCtx: ctx, + commonPrm: cp, + localOnly: cp.LocalOnly(), + linearReplNum: uint(opts.CopiesNumber), + metainfoConsistencyAttr: metaAttribute(opts.cnr), + containerNodes: opts.containerNodes, + localNodeInContainer: opts.localNodeInContainer, + ecPart: opts.ecPart, + } + if hdr.Signature() != nil { // prepare untrusted-Put object target + if opts.Relay != nil { + target.relay = func(node nodeDesc) error { + c, err := p.clientConstructor.Get(node.info) + if err != nil { + return fmt.Errorf("could not create SDK client %s: %w", node.info.AddressGroup(), err) + } + + return opts.Relay(node.info, c) + } + } return &validatingTarget{ - nextTarget: p.newCommonTarget(ctx, cp, opts, opts.Relay), + nextTarget: target, fmt: p.fmtValidator, maxPayloadSz: maxPayloadSz, @@ -90,7 +114,7 @@ func (p *Service) initTarget(ctx context.Context, hdr *object.Object, cp *util.C } sessionSigner := user.NewAutoIDSigner(*sessionKey) - opts.sessionSigner = sessionSigner + target.sessionSigner = sessionSigner return &validatingTarget{ fmt: p.fmtValidator, unpreparedObject: true, @@ -101,7 +125,7 @@ func (p *Service) initTarget(ctx context.Context, hdr *object.Object, cp *util.C sessionSigner, sToken, p.networkState.CurrentEpoch(), - p.newCommonTarget(ctx, cp, opts, nil), + target, ), homomorphicChecksumRequired: homomorphicChecksumRequired, }, nil @@ -171,36 +195,6 @@ func (p *Service) prepareOptions(hdr *object.Object, cp *util.CommonPrm, opts *P return nil } -func (p *Service) newCommonTarget(ctx context.Context, cp *util.CommonPrm, opts PutInitOptions, relayFn RelayFunc) internal.Target { - var relay func(nodeDesc) error - if relayFn != nil { - relay = func(node nodeDesc) error { - c, err := p.clientConstructor.Get(node.info) - if err != nil { - return fmt.Errorf("could not create SDK client %s: %w", node.info.AddressGroup(), err) - } - - return relayFn(node.info, c) - } - } - - return &distributedTarget{ - svc: p, - localNodeSigner: opts.localNodeSigner, - metaSigner: opts.localSignerRFC6979, - opCtx: ctx, - commonPrm: cp, - localOnly: cp.LocalOnly(), - linearReplNum: uint(opts.CopiesNumber), - metainfoConsistencyAttr: metaAttribute(opts.cnr), - relay: relay, - containerNodes: opts.containerNodes, - localNodeInContainer: opts.localNodeInContainer, - sessionSigner: opts.sessionSigner, - ecPart: opts.ecPart, - } -} - func metaAttribute(cnr container.Container) string { return cnr.Attribute("__NEOFS__METAINFO_CONSISTENCY") } From 00af94f8af5fa84064677354744a9a550413163a Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 6 Aug 2025 18:58:45 +0300 Subject: [PATCH 27/27] sn/object: Inline `prepareOptions` method No longer need to have private `PutInitOptions` fields with this anymore. Signed-off-by: Leonard Lyubich --- pkg/services/object/put/prm.go | 12 --- pkg/services/object/put/streamer.go | 138 +++++++++++++--------------- 2 files changed, 64 insertions(+), 86 deletions(-) diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go index f59b5fff3d..a4b2b9c068 100644 --- a/pkg/services/object/put/prm.go +++ b/pkg/services/object/put/prm.go @@ -1,26 +1,14 @@ package putsvc import ( - iec "github.com/nspcc-dev/neofs-node/internal/ec" "github.com/nspcc-dev/neofs-node/pkg/core/client" - containerSDK "github.com/nspcc-dev/neofs-sdk-go/container" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" ) // RelayFunc relays request using given connection to SN. type RelayFunc = func(client.NodeInfo, client.MultiAddressClient) error type PutInitOptions struct { - cnr containerSDK.Container - CopiesNumber uint32 Relay RelayFunc - - containerNodes ContainerNodes - ecPart iec.PartInfo - localNodeInContainer bool - localSignerRFC6979 neofscrypto.Signer - localNodeSigner neofscrypto.Signer - sessionSigner neofscrypto.Signer } diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 21b1a819b6..623c8eadce 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -29,9 +29,63 @@ func (p *Service) InitPut(ctx context.Context, hdr *object.Object, cp *util.Comm } func (p *Service) initTarget(ctx context.Context, hdr *object.Object, cp *util.CommonPrm, opts PutInitOptions) (internal.Target, error) { - // prepare needed put parameters - if err := p.prepareOptions(hdr, cp, &opts); err != nil { - return nil, fmt.Errorf("(%T) could not prepare put parameters: %w", p, err) + localOnly := cp.LocalOnly() + if localOnly && opts.CopiesNumber > 1 { + return nil, errors.New("storage of multiple object replicas is requested for a local operation") + } + + localNodeKey, err := p.keyStorage.GetKey(nil) + if err != nil { + return nil, fmt.Errorf("get local node's private key: %w", err) + } + + idCnr := hdr.GetContainerID() + if idCnr.IsZero() { + return nil, errors.New("missing container ID") + } + + // get container to store the object + cnr, err := p.cnrSrc.Get(idCnr) + if err != nil { + return nil, fmt.Errorf("(%T) could not get container by ID: %w", p, err) + } + + containerNodes, err := p.neoFSNet.GetContainerNodes(idCnr) + if err != nil { + return nil, fmt.Errorf("select storage nodes for the container: %w", err) + } + + cnrNodes := containerNodes.Unsorted() + ecRulesN := len(containerNodes.ECRules()) + + var localNodeInContainer bool + var ecPart iec.PartInfo + if ecRulesN > 0 { + ecPart, err = iec.GetPartInfo(*hdr) + if err != nil { + return nil, fmt.Errorf("get EC part info from object header: %w", err) + } + + repRulesN := len(containerNodes.PrimaryCounts()) + if ecPart.Index >= 0 { + if ecPart.RuleIndex >= ecRulesN { + return nil, fmt.Errorf("invalid EC part info in object header: EC rule idx=%d with %d rules in total", ecPart.RuleIndex, ecRulesN) + } + if hdr.Signature() == nil { + return nil, errors.New("unsigned EC part object") + } + localNodeInContainer = localNodeInSet(p.neoFSNet, cnrNodes[repRulesN+ecPart.RuleIndex]) + } else { + if repRulesN == 0 && hdr.Signature() != nil { + return nil, errors.New("missing EC part info in signed object") + } + localNodeInContainer = localNodeInSets(p.neoFSNet, cnrNodes) + } + } else { + localNodeInContainer = localNodeInSets(p.neoFSNet, cnrNodes) + } + if !localNodeInContainer && localOnly { + return nil, errors.New("local operation on the node not compliant with the container storage policy") } maxPayloadSz := p.maxSizeSrc.MaxObjectSize() @@ -39,20 +93,20 @@ func (p *Service) initTarget(ctx context.Context, hdr *object.Object, cp *util.C return nil, fmt.Errorf("(%T) could not obtain max object size parameter", p) } - homomorphicChecksumRequired := !opts.cnr.IsHomomorphicHashingDisabled() + homomorphicChecksumRequired := !cnr.IsHomomorphicHashingDisabled() target := &distributedTarget{ svc: p, - localNodeSigner: opts.localNodeSigner, - metaSigner: opts.localSignerRFC6979, + localNodeSigner: (*neofsecdsa.Signer)(localNodeKey), + metaSigner: (*neofsecdsa.SignerRFC6979)(localNodeKey), opCtx: ctx, commonPrm: cp, localOnly: cp.LocalOnly(), linearReplNum: uint(opts.CopiesNumber), - metainfoConsistencyAttr: metaAttribute(opts.cnr), - containerNodes: opts.containerNodes, - localNodeInContainer: opts.localNodeInContainer, - ecPart: opts.ecPart, + metainfoConsistencyAttr: metaAttribute(cnr), + containerNodes: containerNodes, + localNodeInContainer: localNodeInContainer, + ecPart: ecPart, } if hdr.Signature() != nil { @@ -131,70 +185,6 @@ func (p *Service) initTarget(ctx context.Context, hdr *object.Object, cp *util.C }, nil } -func (p *Service) prepareOptions(hdr *object.Object, cp *util.CommonPrm, opts *PutInitOptions) error { - localOnly := cp.LocalOnly() - if localOnly && opts.CopiesNumber > 1 { - return errors.New("storage of multiple object replicas is requested for a local operation") - } - - localNodeKey, err := p.keyStorage.GetKey(nil) - if err != nil { - return fmt.Errorf("get local node's private key: %w", err) - } - - idCnr := hdr.GetContainerID() - if idCnr.IsZero() { - return errors.New("missing container ID") - } - - // get container to store the object - opts.cnr, err = p.cnrSrc.Get(idCnr) - if err != nil { - return fmt.Errorf("(%T) could not get container by ID: %w", p, err) - } - - opts.containerNodes, err = p.neoFSNet.GetContainerNodes(idCnr) - if err != nil { - return fmt.Errorf("select storage nodes for the container: %w", err) - } - cnrNodes := opts.containerNodes.Unsorted() - ecRulesN := len(opts.containerNodes.ECRules()) - if ecRulesN > 0 { - ecPart, err := iec.GetPartInfo(*hdr) - if err != nil { - return fmt.Errorf("get EC part info from object header: %w", err) - } - - repRulesN := len(opts.containerNodes.PrimaryCounts()) - if ecPart.Index >= 0 { - if ecPart.RuleIndex >= ecRulesN { - return fmt.Errorf("invalid EC part info in object header: EC rule idx=%d with %d rules in total", ecPart.RuleIndex, ecRulesN) - } - if hdr.Signature() == nil { - return errors.New("unsigned EC part object") - } - opts.localNodeInContainer = localNodeInSet(p.neoFSNet, cnrNodes[repRulesN+ecPart.RuleIndex]) - } else { - if repRulesN == 0 && hdr.Signature() != nil { - return errors.New("missing EC part info in signed object") - } - opts.localNodeInContainer = localNodeInSets(p.neoFSNet, cnrNodes) - } - - opts.ecPart = ecPart - } else { - opts.localNodeInContainer = localNodeInSets(p.neoFSNet, cnrNodes) - } - if !opts.localNodeInContainer && localOnly { - return errors.New("local operation on the node not compliant with the container storage policy") - } - - opts.localNodeSigner = (*neofsecdsa.Signer)(localNodeKey) - opts.localSignerRFC6979 = (*neofsecdsa.SignerRFC6979)(localNodeKey) - - return nil -} - func metaAttribute(cnr container.Container) string { return cnr.Attribute("__NEOFS__METAINFO_CONSISTENCY") }