Skip to content

Commit 33de6c8

Browse files
graph/db: test zombie channel cleaning
1 parent 6a215f2 commit 33de6c8

File tree

1 file changed

+140
-0
lines changed

1 file changed

+140
-0
lines changed

graph/db/graph_cache_test.go

Lines changed: 140 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,11 @@
11
package graphdb
22

33
import (
4+
"encoding/binary"
45
"encoding/hex"
6+
"math/rand"
57
"testing"
8+
"time"
69

710
"github.com/lightningnetwork/lnd/fn/v2"
811
"github.com/lightningnetwork/lnd/graph/db/models"
@@ -21,8 +24,14 @@ var (
2124
"f4484f",
2225
)
2326

27+
pubKey3Bytes, _ = hex.DecodeString(
28+
"02a1633e42df2134a2b0330159d66575316471583448590688c7a1418da7" +
29+
"33d9e4",
30+
)
31+
2432
pubKey1, _ = route.NewVertexFromBytes(pubKey1Bytes)
2533
pubKey2, _ = route.NewVertexFromBytes(pubKey2Bytes)
34+
pubKey3, _ = route.NewVertexFromBytes(pubKey3Bytes)
2635
)
2736

2837
// TestGraphCacheAddNode tests that a channel going from node A to node B can be
@@ -121,6 +130,80 @@ func TestGraphCacheAddNode(t *testing.T) {
121130
runTest(pubKey2, pubKey1)
122131
}
123132

133+
// TestGraphCacheCleanupZombies tests that the cleanupZombies function correctly
134+
// removes zombie channels from the cache.
135+
func TestGraphCacheCleanupZombies(t *testing.T) {
136+
t.Parallel()
137+
138+
cache := NewGraphCache(10)
139+
140+
// Add a channel between two nodes. This will be the zombie.
141+
info1 := &models.CachedEdgeInfo{
142+
ChannelID: 1000,
143+
NodeKey1Bytes: pubKey1,
144+
NodeKey2Bytes: pubKey2,
145+
Capacity: 500,
146+
}
147+
cache.AddChannel(info1, nil, nil)
148+
149+
// Add a second channel, which will NOT be a zombie.
150+
info2 := &models.CachedEdgeInfo{
151+
ChannelID: 1001,
152+
NodeKey1Bytes: pubKey2,
153+
NodeKey2Bytes: pubKey3,
154+
Capacity: 500,
155+
}
156+
157+
cache.AddChannel(info2, nil, nil)
158+
zeroVertex := route.Vertex{}
159+
160+
// Try to remove the channel, which will mark it as a zombie.
161+
cache.RemoveChannel(zeroVertex, zeroVertex, 1000)
162+
require.Equal(t, 1, len(cache.zombieIndex))
163+
164+
// Now, run the cleanup function.
165+
cache.cleanupZombies()
166+
167+
// Check that the zombie channel has been removed from the first node.
168+
require.Empty(t, cache.nodeChannels[pubKey1])
169+
170+
// Check that the second node still has the non-zombie channel.
171+
require.Len(t, cache.nodeChannels[pubKey2], 1)
172+
_, ok := cache.nodeChannels[pubKey2][1001]
173+
require.True(t, ok)
174+
175+
// And the third node should also have the non-zombie channel.
176+
require.Len(t, cache.nodeChannels[pubKey3], 1)
177+
_, ok = cache.nodeChannels[pubKey3][1001]
178+
require.True(t, ok)
179+
180+
// And the zombie index should be cleared.
181+
require.Empty(t, cache.zombieIndex)
182+
}
183+
184+
// TestGraphCacheZombieCleanerLifeCycle tests that the zombie cleaner's Start
185+
// and Stop functions work correctly.
186+
func TestGraphCacheZombieCleanerLifeCycle(t *testing.T) {
187+
t.Parallel()
188+
189+
cache := NewGraphCache(10)
190+
cache.zombieCleanerInterval = 50 * time.Millisecond
191+
zeroVertex := route.Vertex{}
192+
cache.RemoveChannel(zeroVertex, zeroVertex, 123)
193+
cache.Start()
194+
195+
// Wait for the cleaner to run and clean up the zombie index.
196+
require.Eventually(t, func() bool {
197+
cache.mtx.RLock()
198+
defer cache.mtx.RUnlock()
199+
return len(cache.zombieIndex) == 0
200+
}, time.Second, 10*time.Millisecond)
201+
202+
// Stop the cleaner. This will block until the goroutine has exited.
203+
// If it doesn't exit, the test will time out.
204+
cache.Stop()
205+
}
206+
124207
func assertCachedPolicyEqual(t *testing.T, original,
125208
cached *models.CachedEdgePolicy) {
126209

@@ -139,3 +222,60 @@ func assertCachedPolicyEqual(t *testing.T, original,
139222
require.Equal(t, original.ToNodePubKey(), cached.ToNodePubKey())
140223
}
141224
}
225+
226+
// BenchmarkGraphCacheCleanupZombies benchmarks the cleanupZombies function
227+
// with a large number of nodes and channels. It creates a graph cache with
228+
// 50,000 nodes and 500,000 channels, marks 10% of the channels as zombies,
229+
// and then runs the cleanup function.
230+
func BenchmarkGraphCacheCleanupZombies(b *testing.B) {
231+
const (
232+
numNodes = 50_000
233+
numChannels = 500_000
234+
)
235+
zeroVertex := route.Vertex{}
236+
237+
nodes := make([]route.Vertex, numNodes)
238+
for i := range numNodes {
239+
var pubkeyBytes [33]byte
240+
binary.LittleEndian.PutUint64(pubkeyBytes[:], uint64(i*100))
241+
node, err := route.NewVertexFromBytes(pubkeyBytes[:])
242+
if err != nil {
243+
b.Fatalf("unable to create pubkey: %v", err)
244+
}
245+
246+
nodes[i] = node
247+
}
248+
249+
for range b.N {
250+
cache := NewGraphCache(numNodes)
251+
for i := range numChannels {
252+
node1 := nodes[rand.Intn(numNodes)]
253+
node2 := nodes[rand.Intn(numNodes)]
254+
chanID := uint64(i * 1000)
255+
info := &models.CachedEdgeInfo{
256+
ChannelID: chanID,
257+
NodeKey1Bytes: node1,
258+
NodeKey2Bytes: node2,
259+
Capacity: 1000000,
260+
}
261+
262+
cache.AddChannel(info, nil, nil)
263+
}
264+
265+
// Mark 10% of the channels as zombies + some invalid ones.
266+
for j := range numChannels / 10 {
267+
cache.RemoveChannel(zeroVertex, zeroVertex,
268+
uint64(j*1000*10))
269+
cache.RemoveChannel(zeroVertex, zeroVertex,
270+
uint64(j*1000*10+5))
271+
}
272+
273+
b.StartTimer()
274+
cache.cleanupZombies()
275+
b.StopTimer()
276+
}
277+
278+
b.ReportAllocs()
279+
b.ReportMetric(float64(b.Elapsed().Milliseconds())/float64(b.N),
280+
"ms/op")
281+
}

0 commit comments

Comments
 (0)