diff --git a/.changeset/afraid-phones-love.md b/.changeset/afraid-phones-love.md new file mode 100644 index 00000000000..9a2ac388872 --- /dev/null +++ b/.changeset/afraid-phones-love.md @@ -0,0 +1,5 @@ +--- +"@effect/workflow": patch +--- + +Add Workflow type utils diff --git a/.changeset/full-sides-decide.md b/.changeset/full-sides-decide.md new file mode 100644 index 00000000000..42cd852736a --- /dev/null +++ b/.changeset/full-sides-decide.md @@ -0,0 +1,5 @@ +--- +"effect": minor +--- + +Add Effect.fn.Return to allow typing returns on Effect.fn diff --git a/.changeset/good-regions-care.md b/.changeset/good-regions-care.md new file mode 100644 index 00000000000..24054176b55 --- /dev/null +++ b/.changeset/good-regions-care.md @@ -0,0 +1,5 @@ +--- +"effect": minor +--- + +Backport `Graph` module updates diff --git a/.changeset/loud-cows-prove.md b/.changeset/loud-cows-prove.md new file mode 100644 index 00000000000..bfdc2fb2a9e --- /dev/null +++ b/.changeset/loud-cows-prove.md @@ -0,0 +1,38 @@ +--- +"@effect/platform-node-shared": minor +"@effect/platform-node": minor +"@effect/platform-bun": minor +"@effect/cluster": minor +"@effect/rpc": patch +"@effect/workflow": minor +--- + +backport @effect/cluster from effect v4 + +@effect/cluster no longer requires a Shard Manager, and instead relies on the +`RunnerStorage` service to track runner state. + +To migrate, remove any Shard Manager deployments and use the updated layers in +`@effect/platform-node` or `@effect/platform-bun`. + +# Breaking Changes + +- `ShardManager` module has been removed +- `EntityNotManagedByRunner` error has been removed +- Shard locks now use database advisory locks, which requires stable sessions + for database connections. This means load balancers or proxies that rotate + connections may cause issues. +- `@effect/platform-node/NodeClusterSocketRunner` is now + `@effect/cluster/NodeClusterSocket` +- `@effect/platform-node/NodeClusterHttpRunner` is now + `@effect/cluster/NodeClusterHttp` +- `@effect/platform-bun/BunClusterSocketRunner` is now + `@effect/cluster/BunClusterSocket` +- `@effect/platform-bun/BunClusterHttpRunner` is now + `@effect/cluster/BunClusterHttp` + +# New Features + +- `RunnerHealth.layerK8s` has been added, which uses the Kubernetes API to track + runner health and liveness. To use it, you will need a service account with + permissions to read pod information. diff --git a/.changeset/petite-signs-thank.md b/.changeset/petite-signs-thank.md new file mode 100644 index 00000000000..a3e4040b348 --- /dev/null +++ b/.changeset/petite-signs-thank.md @@ -0,0 +1,5 @@ +--- +"@effect/sql-pg": patch +--- + +disable pg onnotice by default diff --git a/.changeset/plenty-bats-ask.md b/.changeset/plenty-bats-ask.md new file mode 100644 index 00000000000..dcdbf63508d --- /dev/null +++ b/.changeset/plenty-bats-ask.md @@ -0,0 +1,5 @@ +--- +"@effect/platform": patch +--- + +expose Layer output in HttpLayerRouter.serve diff --git a/.changeset/sad-bags-fall.md b/.changeset/sad-bags-fall.md new file mode 100644 index 00000000000..11d779e89d0 --- /dev/null +++ b/.changeset/sad-bags-fall.md @@ -0,0 +1,5 @@ +--- +"@effect/sql-pg": minor +--- + +Use "pg" npm library for @effect/sql-pg backend diff --git a/.changeset/warm-aliens-dig.md b/.changeset/warm-aliens-dig.md new file mode 100644 index 00000000000..788ddc9cafd --- /dev/null +++ b/.changeset/warm-aliens-dig.md @@ -0,0 +1,5 @@ +--- +"effect": minor +--- + +add experimental HashRing module diff --git a/packages/cluster/src/ClusterError.ts b/packages/cluster/src/ClusterError.ts index 433504d938e..423e91ad691 100644 --- a/packages/cluster/src/ClusterError.ts +++ b/packages/cluster/src/ClusterError.ts @@ -45,30 +45,6 @@ export class EntityNotAssignedToRunner extends Schema.TaggedError()( - "EntityNotManagedByRunner", - { address: EntityAddress } -) { - /** - * @since 1.0.0 - */ - readonly [TypeId] = TypeId - - /** - * @since 1.0.0 - */ - static is(u: unknown): u is EntityNotManagedByRunner { - return hasProperty(u, TypeId) && isTagged(u, "EntityNotManagedByRunner") - } -} - /** * Represents an error that occurs when a message fails to be properly * deserialized by an entity. diff --git a/packages/cluster/src/ClusterMetrics.ts b/packages/cluster/src/ClusterMetrics.ts index aec47cc152e..e8ee5c7809e 100644 --- a/packages/cluster/src/ClusterMetrics.ts +++ b/packages/cluster/src/ClusterMetrics.ts @@ -3,12 +3,6 @@ */ import * as Metric from "effect/Metric" -/** - * @since 1.0.0 - * @category metrics - */ -export const shards = Metric.gauge("effect_cluster_shards") - /** * @since 1.0.0 * @category metrics @@ -21,28 +15,30 @@ export const entities = Metric.gauge("effect_cluster_entities", { * @since 1.0.0 * @category metrics */ -export const singletons = Metric.gauge("effect_cluster_singletons") - -/** - * @since 1.0.0 - * @category metrics - */ -export const runners = Metric.gauge("effect_cluster_runners") +export const singletons = Metric.gauge("effect_cluster_singletons", { + bigint: true +}) /** * @since 1.0.0 * @category metrics */ -export const assignedShards = Metric.gauge("effect_cluster_shards_assigned") +export const runners = Metric.gauge("effect_cluster_runners", { + bigint: true +}) /** * @since 1.0.0 * @category metrics */ -export const unassignedShards = Metric.gauge("effect_cluster_shards_unassigned") +export const runnersHealthy = Metric.gauge("effect_cluster_runners_healthy", { + bigint: true +}) /** * @since 1.0.0 * @category metrics */ -export const rebalances = Metric.counter("effect_cluster_rebalances") +export const shards = Metric.gauge("effect_cluster_shards", { + bigint: true +}) diff --git a/packages/cluster/src/ClusterWorkflowEngine.ts b/packages/cluster/src/ClusterWorkflowEngine.ts index 1559fff4d13..2d543c76006 100644 --- a/packages/cluster/src/ClusterWorkflowEngine.ts +++ b/packages/cluster/src/ClusterWorkflowEngine.ts @@ -2,24 +2,27 @@ * @since 1.0.0 */ import * as Rpc from "@effect/rpc/Rpc" +import * as RpcServer from "@effect/rpc/RpcServer" import { DurableDeferred } from "@effect/workflow" import * as Activity from "@effect/workflow/Activity" import * as DurableClock from "@effect/workflow/DurableClock" import * as Workflow from "@effect/workflow/Workflow" import { WorkflowEngine, WorkflowInstance } from "@effect/workflow/WorkflowEngine" import * as Arr from "effect/Array" +import * as Cause from "effect/Cause" import * as Context from "effect/Context" import * as DateTime from "effect/DateTime" import * as Duration from "effect/Duration" import * as Effect from "effect/Effect" import type * as Exit from "effect/Exit" import * as Fiber from "effect/Fiber" +import * as FiberId from "effect/FiberId" import * as Layer from "effect/Layer" import * as Option from "effect/Option" import type * as ParseResult from "effect/ParseResult" import * as PrimaryKey from "effect/PrimaryKey" import * as RcMap from "effect/RcMap" -import * as Record from "effect/Record" +import type * as Record from "effect/Record" import * as Runtime from "effect/Runtime" import * as Schedule from "effect/Schedule" import * as Schema from "effect/Schema" @@ -190,16 +193,7 @@ export const make = Effect.gen(function*() { times: 3, schedule: Schedule.exponential(250) }), - Effect.orDie, - (effect, { activity, attempt, executionId }) => - Effect.withSpan(effect, "WorkflowEngine.resetActivityAttempt", { - captureStackTrace: false, - attributes: { - name: activity.name, - executionId, - attempt - } - }) + Effect.orDie ) const clearClock = Effect.fnUntraced(function*(options: { @@ -260,13 +254,12 @@ export const make = Effect.gen(function*() { return { run: (request: Entity.Request) => { const instance = WorkflowInstance.initial(workflow, executionId) - let payload = request.payload + const payload = request.payload let parent: { workflowName: string; executionId: string } | undefined if (payload[payloadParentKey]) { parent = payload[payloadParentKey] - payload = Record.remove(payload, payloadParentKey) } - return execute(payload, executionId).pipe( + return execute(workflow.payloadSchema.make(payload), executionId).pipe( Effect.ensuring(Effect.suspend(() => { if (!instance.suspended) { return parent ? ensureSuccess(sendResumeParent(parent)) : Effect.void @@ -291,9 +284,10 @@ export const make = Effect.gen(function*() { ) as any }, - activity: Effect.fnUntraced( - function*(request: Entity.Request) { - const activityId = `${executionId}/${request.payload.name}` + activity(request: Entity.Request) { + const activityId = `${executionId}/${request.payload.name}` + const instance = WorkflowInstance.initial(workflow, executionId) + return Effect.gen(function*() { let entry = activities.get(activityId) while (!entry) { const latch = Effect.unsafeMakeLatch() @@ -301,7 +295,6 @@ export const make = Effect.gen(function*() { yield* latch.await entry = activities.get(activityId) } - const instance = WorkflowInstance.initial(workflow, executionId) const contextMap = new Map(entry.runtime.context.unsafeMap) contextMap.set(Activity.CurrentAttempt.key, request.payload.attempt) contextMap.set(WorkflowInstance.key, instance) @@ -311,23 +304,29 @@ export const make = Effect.gen(function*() { runtimeFlags: Runtime.defaultRuntimeFlags }) return yield* entry.activity.executeEncoded.pipe( - Effect.interruptible, - Effect.onInterrupt(() => { - instance.suspended = true - return Effect.void - }), - Workflow.intoResult, - Effect.provide(runtime), - Effect.ensuring(Effect.sync(() => { - activities.delete(activityId) - })) + Effect.provide(runtime) ) - }, - Rpc.wrap({ - fork: true, - uninterruptible: true - }) - ), + }).pipe( + Workflow.intoResult, + Effect.catchAllCause((cause) => { + const interruptors = Cause.interruptors(cause) + // we only want to store explicit interrupts + const ids = Array.from(interruptors, (id) => Array.from(FiberId.ids(id))).flat() + const suspend = ids.includes(RpcServer.fiberIdClientInterrupt.id) || + ids.includes(RpcServer.fiberIdTransientInterrupt.id) + return suspend ? Effect.succeed(new Workflow.Suspended()) : Effect.failCause(cause) + }), + Effect.provideService(WorkflowInstance, instance), + Effect.provideService(Activity.CurrentAttempt, request.payload.attempt), + Effect.ensuring(Effect.sync(() => { + activities.delete(activityId) + })), + Rpc.wrap({ + fork: true, + uninterruptible: true + }) + ) + }, deferred: Effect.fnUntraced(function*(request: Entity.Request) { yield* ensureSuccess(resume(workflow, executionId)) @@ -407,27 +406,10 @@ export const make = Effect.gen(function*() { times: 3, schedule: Schedule.exponential(250) }), - Effect.orDie, - (effect, workflow, executionId) => - Effect.withSpan(effect, "WorkflowEngine.interrupt", { - captureStackTrace: false, - attributes: { - name: workflow.name, - executionId - } - }) + Effect.orDie ), - resume: (workflow, executionId) => - ensureSuccess(resume(workflow, executionId)).pipe( - Effect.withSpan("WorkflowEngine.resume", { - captureStackTrace: false, - attributes: { - name: workflow.name, - executionId - } - }) - ), + resume: (workflow, executionId) => ensureSuccess(resume(workflow, executionId)), activityExecute: Effect.fnUntraced( function*({ activity, attempt }) { @@ -460,15 +442,7 @@ export const make = Effect.gen(function*() { return result } }, - Effect.scoped, - (effect, { activity, attempt }) => - Effect.withSpan(effect, "WorkflowEngine.activityExecute", { - captureStackTrace: false, - attributes: { - name: activity.name, - attempt - } - }) + Effect.scoped ), deferredResult: (deferred) => @@ -493,13 +467,7 @@ export const make = Effect.gen(function*() { times: 3, schedule: Schedule.exponential(250) }), - Effect.orDie, - Effect.withSpan("WorkflowEngine.deferredResult", { - captureStackTrace: false, - attributes: { - name: deferred.name - } - }) + Effect.orDie ), deferredDone: Effect.fnUntraced( @@ -512,15 +480,7 @@ export const make = Effect.gen(function*() { }, { discard: true }) ) }, - Effect.scoped, - (effect, { deferredName, executionId }) => - Effect.withSpan(effect, "WorkflowEngine.deferredDone", { - captureStackTrace: false, - attributes: { - name: deferredName, - executionId - } - }) + Effect.scoped ), scheduleClock(options) { diff --git a/packages/cluster/src/Entity.ts b/packages/cluster/src/Entity.ts index 7f152a5acfa..f458154fc1b 100644 --- a/packages/cluster/src/Entity.ts +++ b/packages/cluster/src/Entity.ts @@ -24,12 +24,7 @@ import * as Predicate from "effect/Predicate" import type * as Schedule from "effect/Schedule" import { Scope } from "effect/Scope" import type * as Stream from "effect/Stream" -import type { - AlreadyProcessingMessage, - EntityNotManagedByRunner, - MailboxFull, - PersistenceError -} from "./ClusterError.js" +import type { AlreadyProcessingMessage, MailboxFull, PersistenceError } from "./ClusterError.js" import { ShardGroup } from "./ClusterSchema.js" import { EntityAddress } from "./EntityAddress.js" import type { EntityId } from "./EntityId.js" @@ -114,7 +109,7 @@ export interface Entity< entityId: string ) => RpcClient.RpcClient.From< Rpcs, - MailboxFull | AlreadyProcessingMessage | PersistenceError | EntityNotManagedByRunner + MailboxFull | AlreadyProcessingMessage | PersistenceError >, never, Sharding diff --git a/packages/cluster/src/EntityAddress.ts b/packages/cluster/src/EntityAddress.ts index c60145200c7..e1f88433875 100644 --- a/packages/cluster/src/EntityAddress.ts +++ b/packages/cluster/src/EntityAddress.ts @@ -63,3 +63,13 @@ export class EntityAddress extends Schema.Class(SymbolKey)({ export const EntityAddressFromSelf: Schema.Schema = Schema.typeSchema( EntityAddress ) + +/** + * @since 4.0.0 + * @category constructors + */ +export const make = (options: { + readonly shardId: ShardId + readonly entityType: EntityType + readonly entityId: EntityId +}): EntityAddress => new EntityAddress(options, { disableValidation: true }) diff --git a/packages/cluster/src/EntityId.ts b/packages/cluster/src/EntityId.ts index b36e4d45770..8f226ae37b4 100644 --- a/packages/cluster/src/EntityId.ts +++ b/packages/cluster/src/EntityId.ts @@ -14,3 +14,9 @@ export const EntityId = Schema.NonEmptyTrimmedString.pipe(Schema.brand("EntityId * @category models */ export type EntityId = typeof EntityId.Type + +/** + * @since 1.0.0 + * @category constructors + */ +export const make = (id: string): EntityId => id as EntityId diff --git a/packages/cluster/src/EntityProxy.ts b/packages/cluster/src/EntityProxy.ts index 2840653e003..6eb5290d069 100644 --- a/packages/cluster/src/EntityProxy.ts +++ b/packages/cluster/src/EntityProxy.ts @@ -6,14 +6,13 @@ import * as HttpApiGroup from "@effect/platform/HttpApiGroup" import * as Rpc from "@effect/rpc/Rpc" import * as RpcGroup from "@effect/rpc/RpcGroup" import * as Schema from "effect/Schema" -import { AlreadyProcessingMessage, EntityNotManagedByRunner, MailboxFull, PersistenceError } from "./ClusterError.js" +import { AlreadyProcessingMessage, MailboxFull, PersistenceError } from "./ClusterError.js" import type * as Entity from "./Entity.js" const clientErrors = [ MailboxFull, AlreadyProcessingMessage, - PersistenceError, - EntityNotManagedByRunner + PersistenceError ] as const /** @@ -94,12 +93,14 @@ export type ConvertRpcs = Rpcs exte }>, _Success, Schema.Schema< - _Error["Type"] | MailboxFull | AlreadyProcessingMessage | PersistenceError | EntityNotManagedByRunner, + | _Error["Type"] + | MailboxFull + | AlreadyProcessingMessage + | PersistenceError | _Error["Encoded"] | typeof MailboxFull["Encoded"] | typeof AlreadyProcessingMessage["Encoded"] - | typeof PersistenceError["Encoded"] - | typeof EntityNotManagedByRunner["Encoded"], + | typeof PersistenceError["Encoded"], _Error["Context"] > > @@ -113,8 +114,7 @@ export type ConvertRpcs = Rpcs exte Schema.Union<[ typeof MailboxFull, typeof AlreadyProcessingMessage, - typeof PersistenceError, - typeof EntityNotManagedByRunner + typeof PersistenceError ]> > : never @@ -210,7 +210,7 @@ export type ConvertHttpApi = Rpcs extends Rpc.Rpc< _Payload["Type"], never, _Success["Type"], - _Error["Type"] | MailboxFull | AlreadyProcessingMessage | PersistenceError | EntityNotManagedByRunner, + _Error["Type"] | MailboxFull | AlreadyProcessingMessage | PersistenceError, _Payload["Context"] | _Success["Context"], _Error["Context"] > @@ -222,6 +222,6 @@ export type ConvertHttpApi = Rpcs extends Rpc.Rpc< _Payload["Type"], never, void, - MailboxFull | AlreadyProcessingMessage | PersistenceError | EntityNotManagedByRunner + MailboxFull | AlreadyProcessingMessage | PersistenceError > : never diff --git a/packages/cluster/src/HttpCommon.ts b/packages/cluster/src/HttpCommon.ts deleted file mode 100644 index c2e25a8dce5..00000000000 --- a/packages/cluster/src/HttpCommon.ts +++ /dev/null @@ -1,73 +0,0 @@ -/** - * @since 1.0.0 - */ -import * as HttpClient from "@effect/platform/HttpClient" -import * as HttpClientRequest from "@effect/platform/HttpClientRequest" -import * as Socket from "@effect/platform/Socket" -import * as RpcClient from "@effect/rpc/RpcClient" -import * as RpcSerialization from "@effect/rpc/RpcSerialization" -import * as Effect from "effect/Effect" -import * as Layer from "effect/Layer" -import { RpcClientProtocol } from "./Runners.js" - -/** - * @since 1.0.0 - * @category Layers - */ -export const layerClientProtocolHttp = (options: { - readonly path: string - readonly https?: boolean | undefined -}): Layer.Layer< - RpcClientProtocol, - never, - RpcSerialization.RpcSerialization | HttpClient.HttpClient -> => - Layer.effect( - RpcClientProtocol, - Effect.gen(function*() { - const serialization = yield* RpcSerialization.RpcSerialization - const client = yield* HttpClient.HttpClient - const https = options.https ?? false - return (address) => { - const clientWithUrl = HttpClient.mapRequest( - client, - HttpClientRequest.prependUrl(`http${https ? "s" : ""}://${address.host}:${address.port}/${options.path}`) - ) - return RpcClient.makeProtocolHttp(clientWithUrl).pipe( - Effect.provideService(RpcSerialization.RpcSerialization, serialization) - ) - } - }) - ) - -/** - * @since 1.0.0 - * @category Layers - */ -export const layerClientProtocolWebsocket = (options: { - readonly path: string - readonly https?: boolean | undefined -}): Layer.Layer< - RpcClientProtocol, - never, - RpcSerialization.RpcSerialization | Socket.WebSocketConstructor -> => - Layer.effect( - RpcClientProtocol, - Effect.gen(function*() { - const serialization = yield* RpcSerialization.RpcSerialization - const https = options.https ?? false - const constructor = yield* Socket.WebSocketConstructor - return Effect.fnUntraced(function*(address) { - const socket = yield* Socket.makeWebSocket( - `ws${https ? "s" : ""}://${address.host}:${address.port}/${options.path}` - ).pipe( - Effect.provideService(Socket.WebSocketConstructor, constructor) - ) - return yield* RpcClient.makeProtocolSocket().pipe( - Effect.provideService(Socket.Socket, socket), - Effect.provideService(RpcSerialization.RpcSerialization, serialization) - ) - }) - }) - ) diff --git a/packages/cluster/src/HttpRunner.ts b/packages/cluster/src/HttpRunner.ts index 321c7698971..4ffcd4bd06a 100644 --- a/packages/cluster/src/HttpRunner.ts +++ b/packages/cluster/src/HttpRunner.ts @@ -1,34 +1,116 @@ /** * @since 1.0.0 */ -import type * as HttpApp from "@effect/platform/HttpApp" -import type * as HttpClient from "@effect/platform/HttpClient" -import * as HttpRouter from "@effect/platform/HttpRouter" -import * as HttpServer from "@effect/platform/HttpServer" -import type * as Socket from "@effect/platform/Socket" -import type * as RpcSerialization from "@effect/rpc/RpcSerialization" +import * as HttpClient from "@effect/platform/HttpClient" +import * as HttpClientRequest from "@effect/platform/HttpClientRequest" +import * as HttpRouter from "@effect/platform/HttpLayerRouter" +import type * as HttpServer from "@effect/platform/HttpServer" +import type { HttpServerRequest } from "@effect/platform/HttpServerRequest" +import type { HttpServerResponse } from "@effect/platform/HttpServerResponse" +import * as Socket from "@effect/platform/Socket" +import * as RpcClient from "@effect/rpc/RpcClient" +import * as RpcSerialization from "@effect/rpc/RpcSerialization" import * as RpcServer from "@effect/rpc/RpcServer" import * as Effect from "effect/Effect" import * as Layer from "effect/Layer" import type { Scope } from "effect/Scope" -import { layerClientProtocolHttp, layerClientProtocolWebsocket } from "./HttpCommon.js" import type { MessageStorage } from "./MessageStorage.js" +import type { RunnerHealth } from "./RunnerHealth.js" import * as Runners from "./Runners.js" +import { RpcClientProtocol } from "./Runners.js" import * as RunnerServer from "./RunnerServer.js" +import type { RunnerStorage } from "./RunnerStorage.js" import * as Sharding from "./Sharding.js" import type * as ShardingConfig from "./ShardingConfig.js" -import * as ShardManager from "./ShardManager.js" -import type { ShardStorage } from "./ShardStorage.js" -import * as SynchronizedClock from "./SynchronizedClock.js" + +/** + * @since 1.0.0 + * @category Layers + */ +export const layerClientProtocolHttp = (options: { + readonly path: string + readonly https?: boolean | undefined +}): Layer.Layer< + RpcClientProtocol, + never, + RpcSerialization.RpcSerialization | HttpClient.HttpClient +> => + Layer.effect(RpcClientProtocol)( + Effect.gen(function*() { + const serialization = yield* RpcSerialization.RpcSerialization + const client = yield* HttpClient.HttpClient + const https = options.https ?? false + return (address) => { + const clientWithUrl = HttpClient.mapRequest( + client, + HttpClientRequest.prependUrl(`http${https ? "s" : ""}://${address.host}:${address.port}/${options.path}`) + ) + return RpcClient.makeProtocolHttp(clientWithUrl).pipe( + Effect.provideService(RpcSerialization.RpcSerialization, serialization) + ) + } + }) + ) + +/** + * @since 1.0.0 + * @category Layers + */ +export const layerClientProtocolHttpDefault: Layer.Layer< + Runners.RpcClientProtocol, + never, + RpcSerialization.RpcSerialization | HttpClient.HttpClient +> = layerClientProtocolHttp({ path: "/" }) + +/** + * @since 1.0.0 + * @category Layers + */ +export const layerClientProtocolWebsocket = (options: { + readonly path: string + readonly https?: boolean | undefined +}): Layer.Layer< + RpcClientProtocol, + never, + RpcSerialization.RpcSerialization | Socket.WebSocketConstructor +> => + Layer.effect(RpcClientProtocol)( + Effect.gen(function*() { + const serialization = yield* RpcSerialization.RpcSerialization + const https = options.https ?? false + const constructor = yield* Socket.WebSocketConstructor + return Effect.fnUntraced(function*(address) { + const socket = yield* Socket.makeWebSocket( + `ws${https ? "s" : ""}://${address.host}:${address.port}/${options.path}` + ).pipe( + Effect.provideService(Socket.WebSocketConstructor, constructor) + ) + return yield* RpcClient.makeProtocolSocket().pipe( + Effect.provideService(Socket.Socket, socket), + Effect.provideService(RpcSerialization.RpcSerialization, serialization) + ) + }) + }) + ) + +/** + * @since 1.0.0 + * @category Layers + */ +export const layerClientProtocolWebsocketDefault: Layer.Layer< + Runners.RpcClientProtocol, + never, + RpcSerialization.RpcSerialization | Socket.WebSocketConstructor +> = layerClientProtocolWebsocket({ path: "/" }) /** * @since 1.0.0 * @category Http App */ -export const toHttpApp: Effect.Effect< - HttpApp.Default, +export const toHttpEffect: Effect.Effect< + Effect.Effect, never, - Scope | Sharding.Sharding | RpcSerialization.RpcSerialization | MessageStorage + Scope | RpcSerialization.RpcSerialization | Sharding.Sharding | MessageStorage > = Effect.gen(function*() { const handlers = yield* Layer.build(RunnerServer.layerHandlers) return yield* RpcServer.toHttpApp(Runners.Rpcs, { @@ -41,18 +123,16 @@ export const toHttpApp: Effect.Effect< * @since 1.0.0 * @category Http App */ -export const toHttpAppWebsocket: Effect.Effect< - HttpApp.Default, +export const toHttpEffectWebsocket: Effect.Effect< + Effect.Effect, never, - Scope | Sharding.Sharding | RpcSerialization.RpcSerialization | MessageStorage + Scope | RpcSerialization.RpcSerialization | Sharding.Sharding | MessageStorage > = Effect.gen(function*() { const handlers = yield* Layer.build(RunnerServer.layerHandlers) return yield* RpcServer.toHttpAppWebsocket(Runners.Rpcs, { spanPrefix: "RunnerServer", disableTracing: true - }).pipe( - Effect.provide(handlers) - ) + }).pipe(Effect.provide(handlers)) }) /** @@ -62,75 +142,55 @@ export const toHttpAppWebsocket: Effect.Effect< export const layerClient: Layer.Layer< Sharding.Sharding | Runners.Runners, never, - ShardingConfig.ShardingConfig | Runners.RpcClientProtocol | MessageStorage | ShardStorage + ShardingConfig.ShardingConfig | Runners.RpcClientProtocol | MessageStorage | RunnerStorage | RunnerHealth > = Sharding.layer.pipe( - Layer.provideMerge(Runners.layerRpc), - Layer.provideMerge(SynchronizedClock.layer), - Layer.provide(ShardManager.layerClientRpc) + Layer.provideMerge(Runners.layerRpc) ) /** * A HTTP layer for the `Runners` services, that adds a route to the provided - * `HttpRouter.Tag`. - * - * By default, it uses the `HttpRouter.Default` tag. + * `HttpRouter`. * * @since 1.0.0 * @category Layers */ -export const layer = (options: { +export const layerHttpOptions = (options: { readonly path: HttpRouter.PathInput - readonly routerTag?: HttpRouter.HttpRouter.TagClass - readonly logAddress?: boolean | undefined }): Layer.Layer< Sharding.Sharding | Runners.Runners, never, + | RunnerStorage + | RunnerHealth | RpcSerialization.RpcSerialization + | MessageStorage | ShardingConfig.ShardingConfig | Runners.RpcClientProtocol - | HttpServer.HttpServer - | MessageStorage - | ShardStorage -> => { - const layer = RunnerServer.layerWithClients.pipe( + | HttpRouter.HttpRouter +> => + RunnerServer.layerWithClients.pipe( Layer.provide(RpcServer.layerProtocolHttp(options)) ) - return options.logAddress ? withLogAddress(layer) : layer -} /** * @since 1.0.0 * @category Layers */ -export const layerWebsocketOptions = (options: { +export const layerWebsocketOptions = (options: { readonly path: HttpRouter.PathInput - readonly routerTag?: HttpRouter.HttpRouter.TagClass - readonly logAddress?: boolean | undefined }): Layer.Layer< Sharding.Sharding | Runners.Runners, never, - | RpcSerialization.RpcSerialization | ShardingConfig.ShardingConfig | Runners.RpcClientProtocol - | HttpServer.HttpServer | MessageStorage - | ShardStorage -> => { - const layer = RunnerServer.layerWithClients.pipe( + | RunnerStorage + | RunnerHealth + | RpcSerialization.RpcSerialization + | HttpRouter.HttpRouter +> => + RunnerServer.layerWithClients.pipe( Layer.provide(RpcServer.layerProtocolWebsocket(options)) ) - return options.logAddress ? withLogAddress(layer) : layer -} - -const withLogAddress = (layer: Layer.Layer): Layer.Layer => - Layer.effectDiscard( - HttpServer.addressFormattedWith((address) => - Effect.annotateLogs(Effect.logInfo(`Listening on: ${address}`), { - package: "@effect/cluster", - service: "Runner" - }) - ) - ).pipe(Layer.provideMerge(layer)) /** * @since 1.0.0 @@ -144,10 +204,10 @@ export const layerHttp: Layer.Layer< | HttpClient.HttpClient | HttpServer.HttpServer | MessageStorage - | ShardStorage -> = HttpRouter.Default.serve().pipe( - Layer.provideMerge(layer({ path: "/", logAddress: true })), - Layer.provide(layerClientProtocolHttp({ path: "/" })) + | RunnerStorage + | RunnerHealth +> = HttpRouter.serve(layerHttpOptions({ path: "/" })).pipe( + Layer.provide(layerClientProtocolHttpDefault) ) /** @@ -161,8 +221,9 @@ export const layerHttpClientOnly: Layer.Layer< | ShardingConfig.ShardingConfig | HttpClient.HttpClient | MessageStorage + | RunnerStorage > = RunnerServer.layerClientOnly.pipe( - Layer.provide(layerClientProtocolHttp({ path: "/" })) + Layer.provide(layerClientProtocolHttpDefault) ) /** @@ -177,10 +238,10 @@ export const layerWebsocket: Layer.Layer< | Socket.WebSocketConstructor | HttpServer.HttpServer | MessageStorage - | ShardStorage -> = HttpRouter.Default.serve().pipe( - Layer.provideMerge(layerWebsocketOptions({ path: "/", logAddress: true })), - Layer.provide(layerClientProtocolWebsocket({ path: "/" })) + | RunnerStorage + | RunnerHealth +> = HttpRouter.serve(layerWebsocketOptions({ path: "/" })).pipe( + Layer.provide(layerClientProtocolWebsocketDefault) ) /** @@ -190,7 +251,11 @@ export const layerWebsocket: Layer.Layer< export const layerWebsocketClientOnly: Layer.Layer< Sharding.Sharding | Runners.Runners, never, - ShardingConfig.ShardingConfig | MessageStorage | RpcSerialization.RpcSerialization | Socket.WebSocketConstructor + | ShardingConfig.ShardingConfig + | MessageStorage + | RunnerStorage + | RpcSerialization.RpcSerialization + | Socket.WebSocketConstructor > = RunnerServer.layerClientOnly.pipe( - Layer.provide(layerClientProtocolWebsocket({ path: "/" })) + Layer.provide(layerClientProtocolWebsocketDefault) ) diff --git a/packages/cluster/src/HttpShardManager.ts b/packages/cluster/src/HttpShardManager.ts deleted file mode 100644 index e60b3af7f94..00000000000 --- a/packages/cluster/src/HttpShardManager.ts +++ /dev/null @@ -1,273 +0,0 @@ -/** - * @since 1.0.0 - */ -import type * as HttpApp from "@effect/platform/HttpApp" -import type * as HttpClient from "@effect/platform/HttpClient" -import * as HttpRouter from "@effect/platform/HttpRouter" -import * as HttpServer from "@effect/platform/HttpServer" -import type * as Socket from "@effect/platform/Socket" -import type * as RpcSerialization from "@effect/rpc/RpcSerialization" -import * as RpcServer from "@effect/rpc/RpcServer" -import * as Effect from "effect/Effect" -import { identity } from "effect/Function" -import * as Layer from "effect/Layer" -import type { Scope } from "effect/Scope" -import { layerClientProtocolHttp, layerClientProtocolWebsocket } from "./HttpCommon.js" -import * as MessageStorage from "./MessageStorage.js" -import * as RunnerHealth from "./RunnerHealth.js" -import * as Runners from "./Runners.js" -import type { ShardingConfig } from "./ShardingConfig.js" -import * as ShardManager from "./ShardManager.js" -import type { ShardStorage } from "./ShardStorage.js" - -/** - * @since 1.0.0 - * @category Http App - */ -export const toHttpApp: Effect.Effect< - HttpApp.Default, - never, - Scope | RpcSerialization.RpcSerialization | ShardManager.ShardManager -> = Effect.gen(function*() { - const handlers = yield* Layer.build(ShardManager.layerServerHandlers) - return yield* RpcServer.toHttpApp(ShardManager.Rpcs).pipe( - Effect.provide(handlers) - ) -}) - -/** - * @since 1.0.0 - * @category Http App - */ -export const toHttpAppWebsocket: Effect.Effect< - HttpApp.Default, - never, - Scope | RpcSerialization.RpcSerialization | ShardManager.ShardManager -> = Effect.gen(function*() { - const handlers = yield* Layer.build(ShardManager.layerServerHandlers) - return yield* RpcServer.toHttpAppWebsocket(ShardManager.Rpcs).pipe( - Effect.provide(handlers) - ) -}) - -/** - * A layer for the `ShardManager` service, that does not run a server. - * - * It only provides the `Runners` rpc client. - * - * You can use this with the `toHttpApp` and `toHttpAppWebsocket` apis - * to run a complete `ShardManager` server. - * - * @since 1.0.0 - * @category Layers - */ -export const layerNoServerHttp = ( - options: { - readonly runnerPath: string - readonly runnerHttps?: boolean | undefined - } -): Layer.Layer< - ShardManager.ShardManager, - never, - | RpcSerialization.RpcSerialization - | ShardStorage - | RunnerHealth.RunnerHealth - | HttpClient.HttpClient - | ShardManager.Config - | ShardingConfig -> => - ShardManager.layer.pipe( - Layer.provide(Runners.layerRpc.pipe( - Layer.provide([ - layerClientProtocolHttp({ - path: options.runnerPath, - https: options.runnerHttps - }), - MessageStorage.layerNoop - ]) - )) - ) - -/** - * A layer for the `ShardManager` service, that does not run a server. - * - * It only provides the `Runners` rpc client. - * - * You can use this with the `toHttpApp` and `toHttpAppWebsocket` apis - * to run a complete `ShardManager` server. - * - * @since 1.0.0 - * @category Layers - */ -export const layerNoServerWebsocket = ( - options: { - readonly runnerPath: string - readonly runnerHttps?: boolean | undefined - } -): Layer.Layer< - ShardManager.ShardManager, - never, - | RpcSerialization.RpcSerialization - | ShardStorage - | RunnerHealth.RunnerHealth - | Socket.WebSocketConstructor - | ShardManager.Config - | ShardingConfig -> => - ShardManager.layer.pipe( - Layer.provide(Runners.layerRpc.pipe( - Layer.provide([ - layerClientProtocolWebsocket({ - path: options.runnerPath, - https: options.runnerHttps - }), - MessageStorage.layerNoop - ]) - )) - ) - -/** - * A HTTP layer for the `ShardManager` server, that adds a route to the provided - * `HttpRouter.Tag`. - * - * By default, it uses the `HttpRouter.Default` tag. - * - * @since 1.0.0 - * @category Layers - */ -export const layerHttpOptions = ( - options: { - readonly path: HttpRouter.PathInput - readonly routerTag?: HttpRouter.HttpRouter.TagClass - readonly runnerPath: string - readonly runnerHttps?: boolean | undefined - readonly logAddress?: boolean | undefined - } -): Layer.Layer< - ShardManager.ShardManager, - never, - | RpcSerialization.RpcSerialization - | ShardStorage - | RunnerHealth.RunnerHealth - | HttpClient.HttpClient - | HttpServer.HttpServer - | ShardManager.Config - | ShardingConfig -> => { - const routerTag = options.routerTag ?? HttpRouter.Default - return routerTag.serve().pipe( - options.logAddress ? withLogAddress : identity, - Layer.merge(ShardManager.layerServer), - Layer.provide(RpcServer.layerProtocolHttp(options)), - Layer.provideMerge(layerNoServerHttp(options)) - ) -} - -/** - * A WebSocket layer for the `ShardManager` server, that adds a route to the provided - * `HttpRouter.Tag`. - * - * By default, it uses the `HttpRouter.Default` tag. - * - * @since 1.0.0 - * @category Layers - */ -export const layerWebsocketOptions = ( - options: { - readonly path: HttpRouter.PathInput - readonly routerTag?: HttpRouter.HttpRouter.TagClass - readonly runnerPath: string - readonly runnerHttps?: boolean | undefined - readonly logAddress?: boolean | undefined - } -): Layer.Layer< - ShardManager.ShardManager, - never, - | RpcSerialization.RpcSerialization - | ShardStorage - | RunnerHealth.RunnerHealth - | HttpServer.HttpServer - | Socket.WebSocketConstructor - | ShardManager.Config - | ShardingConfig -> => { - const routerTag = options.routerTag ?? HttpRouter.Default - return routerTag.serve().pipe( - options.logAddress ? withLogAddress : identity, - Layer.merge(ShardManager.layerServer), - Layer.provide(RpcServer.layerProtocolWebsocket(options)), - Layer.provideMerge(layerNoServerWebsocket(options)) - ) -} - -const withLogAddress = (layer: Layer.Layer): Layer.Layer => - Layer.effectDiscard( - HttpServer.addressFormattedWith((address) => - Effect.annotateLogs(Effect.logInfo(`Listening on: ${address}`), { - package: "@effect/cluster", - service: "ShardManager" - }) - ) - ).pipe(Layer.provideMerge(layer)) - -/** - * A HTTP layer for the `ShardManager` server, that adds a route to the provided - * `HttpRouter.Tag`. - * - * By default, it uses the `HttpRouter.Default` tag. - * - * @since 1.0.0 - * @category Layers - */ -export const layerHttp: Layer.Layer< - ShardManager.ShardManager, - never, - | RpcSerialization.RpcSerialization - | ShardStorage - | RunnerHealth.RunnerHealth - | HttpClient.HttpClient - | HttpServer.HttpServer - | ShardManager.Config - | ShardingConfig -> = layerHttpOptions({ path: "/", runnerPath: "/" }) - -/** - * A Websocket layer for the `ShardManager` server, that adds a route to the provided - * `HttpRouter.Tag`. - * - * By default, it uses the `HttpRouter.Default` tag. - * - * @since 1.0.0 - * @category Layers - */ -export const layerWebsocket: Layer.Layer< - ShardManager.ShardManager, - never, - | RpcSerialization.RpcSerialization - | ShardStorage - | RunnerHealth.RunnerHealth - | Socket.WebSocketConstructor - | HttpServer.HttpServer - | ShardManager.Config - | ShardingConfig -> = layerWebsocketOptions({ path: "/", runnerPath: "/" }) - -/** - * @since 1.0.0 - * @category Layers - */ -export const layerRunnerHealthHttp: Layer.Layer< - RunnerHealth.RunnerHealth, - never, - RpcSerialization.RpcSerialization | HttpClient.HttpClient | ShardingConfig -> = Layer.provide(RunnerHealth.layerRpc, layerClientProtocolHttp({ path: "/" })) - -/** - * @since 1.0.0 - * @category Layers - */ -export const layerRunnerHealthWebsocket: Layer.Layer< - RunnerHealth.RunnerHealth, - never, - RpcSerialization.RpcSerialization | Socket.WebSocketConstructor | ShardingConfig -> = Layer.provide(RunnerHealth.layerRpc, layerClientProtocolWebsocket({ path: "/" })) diff --git a/packages/cluster/src/MessageStorage.ts b/packages/cluster/src/MessageStorage.ts index d7cce71e1c7..37b052becab 100644 --- a/packages/cluster/src/MessageStorage.ts +++ b/packages/cluster/src/MessageStorage.ts @@ -15,13 +15,13 @@ import type { ParseError } from "effect/ParseResult" import type { Predicate } from "effect/Predicate" import * as Schema from "effect/Schema" import type { PersistenceError } from "./ClusterError.js" -import { MalformedMessage } from "./ClusterError.js" +import { EntityNotAssignedToRunner, MalformedMessage } from "./ClusterError.js" import * as DeliverAt from "./DeliverAt.js" import type { EntityAddress } from "./EntityAddress.js" import * as Envelope from "./Envelope.js" import * as Message from "./Message.js" import * as Reply from "./Reply.js" -import { ShardId } from "./ShardId.js" +import type { ShardId } from "./ShardId.js" import type { ShardingConfig } from "./ShardingConfig.js" import * as Snowflake from "./Snowflake.js" @@ -88,15 +88,19 @@ export class MessageStorage extends Context.Tag("@effect/cluster/MessageStorage" * For locally sent messages, register a handler to process the replies. */ readonly registerReplyHandler: ( - message: Message.OutgoingRequest | Message.IncomingRequest, - onUnregister: Effect.Effect - ) => Effect.Effect + message: Message.OutgoingRequest | Message.IncomingRequest + ) => Effect.Effect /** * Unregister the reply handler for the specified message. */ readonly unregisterReplyHandler: (requestId: Snowflake.Snowflake) => Effect.Effect + /** + * Unregister the reply handlers for the specified ShardId. + */ + readonly unregisterShardReplyHandlers: (shardId: ShardId) => Effect.Effect + /** * Retrieves the unprocessed messages for the specified shards. * @@ -342,38 +346,98 @@ export type EncodedRepliesOptions = { * @category constructors */ export const make = ( - storage: Omit + storage: Omit< + MessageStorage["Type"], + "registerReplyHandler" | "unregisterReplyHandler" | "unregisterShardReplyHandlers" + > ): Effect.Effect => Effect.sync(() => { - const replyHandlers = new Map | Message.IncomingRequest + readonly shardSet: Set readonly respond: (reply: Reply.ReplyWithContext) => Effect.Effect - readonly onUnregister: Effect.Effect - }>() + readonly resume: (effect: Effect.Effect) => void + } + const replyHandlers = new Map>() + const replyHandlersShard = new Map>() return MessageStorage.of({ ...storage, - registerReplyHandler: (message, onUnregister) => - Effect.sync(() => { - replyHandlers.set(message.envelope.requestId, { + registerReplyHandler: (message) => { + const requestId = message.envelope.requestId + return Effect.async((resume) => { + const shardId = message.envelope.address.shardId.toString() + let handlers = replyHandlers.get(requestId) + if (handlers === undefined) { + handlers = [] + replyHandlers.set(requestId, handlers) + } + let shardSet = replyHandlersShard.get(shardId) + if (!shardSet) { + shardSet = new Set() + replyHandlersShard.set(shardId, shardSet) + } + const entry: ReplyHandler = { + message, + shardSet, respond: message._tag === "IncomingRequest" ? message.respond : (reply) => message.respond(reply.reply), - onUnregister + resume + } + handlers.push(entry) + shardSet.add(entry) + return Effect.sync(() => { + const index = handlers.indexOf(entry) + handlers.splice(index, 1) + shardSet.delete(entry) }) - }), + }) + }, unregisterReplyHandler: (requestId) => - Effect.suspend(() => { - const handler = replyHandlers.get(requestId) - if (!handler) return Effect.void + Effect.sync(() => { + const handlers = replyHandlers.get(requestId) + if (!handlers) return Effect.void replyHandlers.delete(requestId) - return handler.onUnregister + for (let i = 0; i < handlers.length; i++) { + const handler = handlers[i] + handler.shardSet.delete(handler) + handler.resume(Effect.fail( + new EntityNotAssignedToRunner({ + address: handler.message.envelope.address + }) + )) + } + }), + unregisterShardReplyHandlers: (shardId) => + Effect.sync(() => { + const id = shardId.toString() + const shardSet = replyHandlersShard.get(id) + if (!shardSet) return + replyHandlersShard.delete(id) + shardSet.forEach((handler) => { + replyHandlers.delete(handler.message.envelope.requestId) + handler.resume(Effect.fail( + new EntityNotAssignedToRunner({ + address: handler.message.envelope.address + }) + )) + }) }), saveReply(reply) { + const requestId = reply.reply.requestId return Effect.flatMap(storage.saveReply(reply), () => { - const handler = replyHandlers.get(reply.reply.requestId) - if (!handler) { + const handlers = replyHandlers.get(requestId) + if (!handlers) { return Effect.void } else if (reply.reply._tag === "WithExit") { - replyHandlers.delete(reply.reply.requestId) + replyHandlers.delete(requestId) + for (let i = 0; i < handlers.length; i++) { + const handler = handlers[i] + handler.shardSet.delete(handler) + handler.resume(Effect.void) + } } - return handler.respond(reply) + return handlers.length === 1 + ? handlers[0].respond(reply) + : Effect.forEach(handlers, (handler) => handler.respond(reply)) }) } }) @@ -748,8 +812,9 @@ export class MemoryDriver extends Effect.Service()("@effect/cluste }>() for (let index = 0; index < journal.length; index++) { const envelope = journal[index] - const shardId = ShardId.make(envelope.address.shardId) - if (!unprocessed.has(envelope as any) || !shardIds.includes(shardId.toString())) { + const shardId = envelope.address.shardId + const shardIdStr = `${shardId.group}:${shardId.id}` + if (!unprocessed.has(envelope as any) || !shardIds.includes(shardIdStr)) { continue } if (envelope._tag === "Request") { diff --git a/packages/cluster/src/Runner.ts b/packages/cluster/src/Runner.ts index c9abd6c5a90..74d239dbd0f 100644 --- a/packages/cluster/src/Runner.ts +++ b/packages/cluster/src/Runner.ts @@ -38,7 +38,7 @@ export type TypeId = typeof TypeId export class Runner extends Schema.Class(SymbolKey)({ address: RunnerAddress, groups: Schema.Array(Schema.String), - version: Schema.Int + weight: Schema.Number }) { /** * @since 1.0.0 @@ -71,14 +71,14 @@ export class Runner extends Schema.Class(SymbolKey)({ * @since 1.0.0 */ [Equal.symbol](that: Runner): boolean { - return this.address[Equal.symbol](that.address) && this.version === that.version + return this.address[Equal.symbol](that.address) && this.weight === that.weight } /** * @since 1.0.0 */ [Hash.symbol](): number { - return Hash.cached(this, Hash.string(`${this.address.toString()}:${this.version}`)) + return Hash.cached(this, Hash.string(`${this.address.toString()}:${this.weight}`)) } } @@ -98,5 +98,5 @@ export class Runner extends Schema.Class(SymbolKey)({ export const make = (props: { readonly address: RunnerAddress readonly groups: ReadonlyArray - readonly version: number + readonly weight: number }): Runner => new Runner(props) diff --git a/packages/cluster/src/RunnerAddress.ts b/packages/cluster/src/RunnerAddress.ts index 1d988666fb6..a8e0e13efea 100644 --- a/packages/cluster/src/RunnerAddress.ts +++ b/packages/cluster/src/RunnerAddress.ts @@ -4,6 +4,7 @@ import * as Equal from "effect/Equal" import * as Hash from "effect/Hash" import { NodeInspectSymbol } from "effect/Inspectable" +import * as PrimaryKey from "effect/PrimaryKey" import * as Schema from "effect/Schema" const SymbolKey = "@effect/cluster/RunnerAddress" @@ -33,6 +34,13 @@ export class RunnerAddress extends Schema.Class(SymbolKey)({ */ readonly [TypeId] = TypeId; + /** + * @since 1.0.0 + */ + [PrimaryKey.symbol](): string { + return `${this.host}:${this.port}` + } + /** * @since 1.0.0 */ diff --git a/packages/cluster/src/RunnerHealth.ts b/packages/cluster/src/RunnerHealth.ts index 227183dc1a4..15d6115c52d 100644 --- a/packages/cluster/src/RunnerHealth.ts +++ b/packages/cluster/src/RunnerHealth.ts @@ -1,15 +1,19 @@ /** * @since 1.0.0 */ +import * as FileSystem from "@effect/platform/FileSystem" +import * as HttpClient from "@effect/platform/HttpClient" +import * as HttpClientRequest from "@effect/platform/HttpClientRequest" +import * as HttpClientResponse from "@effect/platform/HttpClientResponse" import * as Context from "effect/Context" import * as Effect from "effect/Effect" +import { identity } from "effect/Function" import * as Layer from "effect/Layer" -import * as RcMap from "effect/RcMap" +import * as Schedule from "effect/Schedule" +import * as Schema from "effect/Schema" import type * as Scope from "effect/Scope" -import * as MessageStorage from "./MessageStorage.js" import type { RunnerAddress } from "./RunnerAddress.js" import * as Runners from "./Runners.js" -import type { ShardingConfig } from "./ShardingConfig.js" /** * Represents the service used to check if a Runner is healthy. @@ -24,46 +28,10 @@ import type { ShardingConfig } from "./ShardingConfig.js" export class RunnerHealth extends Context.Tag("@effect/cluster/RunnerHealth")< RunnerHealth, { - /** - * Used to indicate that a Runner is connected to this host and is healthy, - * while the Scope is active. - */ - readonly onConnection: (address: RunnerAddress) => Effect.Effect readonly isAlive: (address: RunnerAddress) => Effect.Effect } >() {} -/** - * @since 1.0.0 - * @category Constructors - */ -export const make: ( - options: { readonly isAlive: (address: RunnerAddress) => Effect.Effect } -) => Effect.Effect< - RunnerHealth["Type"], - never, - Scope.Scope -> = Effect.fnUntraced(function*(options: { - readonly isAlive: (address: RunnerAddress) => Effect.Effect -}) { - const connections = yield* RcMap.make({ - lookup: (_address: RunnerAddress) => Effect.void - }) - - const onConnection = (address: RunnerAddress) => RcMap.get(connections, address) - const isAlive = Effect.fnUntraced(function*(address: RunnerAddress) { - if (yield* RcMap.has(connections, address)) { - return true - } - return yield* options.isAlive(address) - }) - - return RunnerHealth.of({ - onConnection, - isAlive - }) -}) - /** * A layer which will **always** consider a Runner healthy. * @@ -72,12 +40,9 @@ export const make: ( * @since 1.0.0 * @category layers */ -export const layerNoop = Layer.scoped( - RunnerHealth, - make({ - isAlive: () => Effect.succeed(true) - }) -) +export const layerNoop = Layer.succeed(RunnerHealth, { + isAlive: () => Effect.succeed(true) +}) /** * @since 1.0.0 @@ -89,16 +54,17 @@ export const makePing: Effect.Effect< Runners.Runners | Scope.Scope > = Effect.gen(function*() { const runners = yield* Runners.Runners + const schedule = Schedule.spaced(500) function isAlive(address: RunnerAddress): Effect.Effect { return runners.ping(address).pipe( - Effect.timeout(3000), - Effect.retry({ times: 3 }), + Effect.timeout(10_000), + Effect.retry({ times: 5, schedule }), Effect.isSuccess ) } - return yield* make({ isAlive }) + return RunnerHealth.of({ isAlive }) }) /** @@ -107,23 +73,120 @@ export const makePing: Effect.Effect< * @since 1.0.0 * @category layers */ -export const layer: Layer.Layer< +export const layerPing: Layer.Layer< RunnerHealth, never, Runners.Runners > = Layer.scoped(RunnerHealth, makePing) /** - * A layer which will ping a Runner directly to check if it is healthy. + * @since 1.0.0 + * @category Constructors + */ +export const makeK8s = Effect.fnUntraced(function*(options?: { + readonly namespace?: string | undefined + readonly labelSelector?: string | undefined +}) { + const fs = yield* FileSystem.FileSystem + const token = yield* fs.readFileString("/var/run/secrets/kubernetes.io/serviceaccount/token").pipe( + Effect.option + ) + const client = (yield* HttpClient.HttpClient).pipe( + HttpClient.filterStatusOk + ) + const baseRequest = HttpClientRequest.get("https://kubernetes.default.svc/api").pipe( + token._tag === "Some" ? HttpClientRequest.bearerToken(token.value.trim()) : identity + ) + const getPods = baseRequest.pipe( + HttpClientRequest.appendUrl(options?.namespace ? `/v1/namespaces/${options.namespace}/pods` : "/v1/pods"), + HttpClientRequest.setUrlParam("fieldSelector", "status.phase=Running"), + options?.labelSelector ? HttpClientRequest.setUrlParam("labelSelector", options.labelSelector) : identity + ) + const allPods = yield* client.execute(getPods).pipe( + Effect.flatMap(HttpClientResponse.schemaBodyJson(PodList)), + Effect.map((list) => { + const pods = new Map() + for (let i = 0; i < list.items.length; i++) { + const pod = list.items[i] + pods.set(pod.status.podIP, pod) + } + return pods + }), + Effect.tapErrorCause((cause) => Effect.logWarning("Failed to fetch pods from Kubernetes API", cause)), + Effect.cachedWithTTL("10 seconds") + ) + + return RunnerHealth.of({ + isAlive: (address) => + allPods.pipe( + Effect.map((pods) => pods.get(address.host)?.isReady ?? false), + Effect.catchAllCause(() => Effect.succeed(true)) + ) + }) +}) + +class Pod extends Schema.Class("effect/cluster/RunnerHealth/Pod")({ + status: Schema.Struct({ + phase: Schema.String, + conditions: Schema.Array(Schema.Struct({ + type: Schema.String, + status: Schema.String, + lastTransitionTime: Schema.String + })), + podIP: Schema.String + }) +}) { + get isReady(): boolean { + let initializedAt: string | undefined + let readyAt: string | undefined + for (let i = 0; i < this.status.conditions.length; i++) { + const condition = this.status.conditions[i] + switch (condition.type) { + case "Initialized": { + if (condition.status !== "True") { + return true + } + initializedAt = condition.lastTransitionTime + break + } + case "Ready": { + if (condition.status === "True") { + return true + } + readyAt = condition.lastTransitionTime + break + } + } + } + // if the pod is still booting up, consider it ready as it would have + // already registered itself with RunnerStorage by now + return initializedAt === readyAt + } +} + +const PodList = Schema.Struct({ + items: Schema.Array(Pod) +}) + +/** + * A layer which will check the Kubernetes API to see if a Runner is healthy. + * + * The provided HttpClient will need to add the pod's CA certificate to its + * trusted root certificates in order to communicate with the Kubernetes API. + * + * The pod service account will also need to have permissions to list pods in + * order to use this layer. * * @since 1.0.0 * @category layers */ -export const layerRpc: Layer.Layer< +export const layerK8s = ( + options?: { + readonly namespace?: string | undefined + readonly labelSelector?: string | undefined + } | undefined +): Layer.Layer< RunnerHealth, never, - Runners.RpcClientProtocol | ShardingConfig -> = layer.pipe( - Layer.provide(Runners.layerRpc), - Layer.provide(MessageStorage.layerNoop) -) + HttpClient.HttpClient | FileSystem.FileSystem +> => Layer.effect(RunnerHealth, makeK8s(options)) diff --git a/packages/cluster/src/RunnerServer.ts b/packages/cluster/src/RunnerServer.ts index bc1f889cab0..b09e960794f 100644 --- a/packages/cluster/src/RunnerServer.ts +++ b/packages/cluster/src/RunnerServer.ts @@ -3,20 +3,22 @@ */ import * as RpcServer from "@effect/rpc/RpcServer" import * as Effect from "effect/Effect" +import type * as Exit from "effect/Exit" +import * as Fiber from "effect/Fiber" import { constant } from "effect/Function" import * as Layer from "effect/Layer" import * as Mailbox from "effect/Mailbox" import * as Option from "effect/Option" -import * as ClusterError from "./ClusterError.js" +import * as Runtime from "effect/Runtime" +import type * as ClusterError from "./ClusterError.js" import * as Message from "./Message.js" import * as MessageStorage from "./MessageStorage.js" import * as Reply from "./Reply.js" +import * as RunnerHealth from "./RunnerHealth.js" import * as Runners from "./Runners.js" +import type * as RunnerStorage from "./RunnerStorage.js" import * as Sharding from "./Sharding.js" import { ShardingConfig } from "./ShardingConfig.js" -import * as ShardManager from "./ShardManager.js" -import * as ShardStorage from "./ShardStorage.js" -import * as SynchronizedClock from "./SynchronizedClock.js" const constVoid = constant(Effect.void) @@ -41,41 +43,60 @@ export const layerHandlers = Runners.Rpcs.toLayer(Effect.gen(function*() { : new Message.IncomingEnvelope({ envelope }) ), Effect: ({ persisted, request }) => { - let resume: (reply: Effect.Effect, ClusterError.EntityNotAssignedToRunner>) => void - let replyEncoded: Reply.ReplyEncoded | undefined + let replyEncoded: + | Effect.Effect< + Reply.ReplyEncoded, + ClusterError.EntityNotAssignedToRunner + > + | undefined = undefined + let resume = (reply: Effect.Effect, ClusterError.EntityNotAssignedToRunner>) => { + replyEncoded = reply + } const message = new Message.IncomingRequest({ envelope: request, lastSentReply: Option.none(), respond(reply) { - return Effect.flatMap(Reply.serialize(reply), (reply) => { - if (resume) { - resume(Effect.succeed(reply)) - } else { - replyEncoded = reply - } - return Effect.void - }) + resume(Effect.orDie(Reply.serialize(reply))) + return Effect.void } }) + if (persisted) { + return Effect.async< + Reply.ReplyEncoded, + ClusterError.EntityNotAssignedToRunner + >((resume_) => { + resume = resume_ + const parent = Option.getOrThrow(Fiber.getCurrentFiber()) + const runtime = Runtime.make({ + context: parent.currentContext, + runtimeFlags: Runtime.defaultRuntimeFlags, + fiberRefs: parent.getFiberRefs() + }) + const onExit = ( + exit: Exit.Exit< + any, + ClusterError.EntityNotAssignedToRunner + > + ) => { + if (exit._tag === "Failure") { + resume(exit as any) + } + } + const fiber = Runtime.runFork(runtime)(storage.registerReplyHandler(message)) + fiber.addObserver(onExit) + Runtime.runFork(runtime)(Effect.catchTag( + sharding.notify(message, constWaitUntilRead), + "AlreadyProcessingMessage", + () => Effect.void + )).addObserver(onExit) + return Fiber.interrupt(fiber) + }) + } return Effect.zipRight( - persisted ? - Effect.zipRight( - storage.registerReplyHandler( - message, - Effect.sync(() => - resume(Effect.fail( - new ClusterError.EntityNotAssignedToRunner({ - address: request.address - }) - )) - ) - ), - sharding.notify(message) - ) : - sharding.send(message), + sharding.send(message), Effect.async, ClusterError.EntityNotAssignedToRunner>((resume_) => { if (replyEncoded) { - resume_(Effect.succeed(replyEncoded)) + resume_(replyEncoded) } else { resume = resume_ } @@ -99,17 +120,12 @@ export const layerHandlers = Runners.Rpcs.toLayer(Effect.gen(function*() { return Effect.as( persisted ? Effect.zipRight( - storage.registerReplyHandler( - message, - Effect.suspend(() => - mailbox.fail( - new ClusterError.EntityNotAssignedToRunner({ - address: request.address - }) - ) - ) + storage.registerReplyHandler(message).pipe( + Effect.onError((cause) => mailbox.failCause(cause)), + Effect.forkScoped, + Effect.interruptible ), - sharding.notify(message) + sharding.notify(message, constWaitUntilRead) ) : sharding.send(message), mailbox @@ -120,6 +136,8 @@ export const layerHandlers = Runners.Rpcs.toLayer(Effect.gen(function*() { } })) +const constWaitUntilRead = { waitUntilRead: true } as const + /** * The `RunnerServer` recieves messages from other Runners and forwards them to the * `Sharding` layer. @@ -151,18 +169,17 @@ export const layerWithClients: Layer.Layer< | ShardingConfig | Runners.RpcClientProtocol | MessageStorage.MessageStorage - | ShardStorage.ShardStorage + | RunnerStorage.RunnerStorage + | RunnerHealth.RunnerHealth > = layer.pipe( Layer.provideMerge(Sharding.layer), - Layer.provideMerge(Runners.layerRpc), - Layer.provideMerge(SynchronizedClock.layer), - Layer.provide(ShardManager.layerClientRpc) + Layer.provideMerge(Runners.layerRpc) ) /** * A `Runners` layer that is client only. * - * It will not register with the ShardManager and recieve shard assignments, + * It will not register with RunnerStorage and recieve shard assignments, * so this layer can be used to embed a cluster client inside another effect * application. * @@ -175,10 +192,10 @@ export const layerClientOnly: Layer.Layer< | ShardingConfig | Runners.RpcClientProtocol | MessageStorage.MessageStorage + | RunnerStorage.RunnerStorage > = Sharding.layer.pipe( Layer.provideMerge(Runners.layerRpc), - Layer.provide(ShardManager.layerClientRpc), - Layer.provide(ShardStorage.layerNoop), + Layer.provide(RunnerHealth.layerNoop), Layer.updateService(ShardingConfig, (config) => ({ ...config, runnerAddress: Option.none() diff --git a/packages/cluster/src/RunnerStorage.ts b/packages/cluster/src/RunnerStorage.ts new file mode 100644 index 00000000000..f8955baad10 --- /dev/null +++ b/packages/cluster/src/RunnerStorage.ts @@ -0,0 +1,218 @@ +/** + * @since 1.0.0 + */ +import { isNonEmptyArray, type NonEmptyArray } from "effect/Array" +import * as Context from "effect/Context" +import * as Effect from "effect/Effect" +import * as Layer from "effect/Layer" +import * as MutableHashMap from "effect/MutableHashMap" +import type { PersistenceError } from "./ClusterError.js" +import * as MachineId from "./MachineId.js" +import { Runner } from "./Runner.js" +import type { RunnerAddress } from "./RunnerAddress.js" +import { ShardId } from "./ShardId.js" + +/** + * Represents a generic interface to the persistent storage required by the + * cluster. + * + * @since 1.0.0 + * @category models + */ +export class RunnerStorage extends Context.Tag("@effect/cluster/RunnerStorage") Effect.Effect + + /** + * Unregister the runner with the given address. + */ + readonly unregister: (address: RunnerAddress) => Effect.Effect + + /** + * Get all runners registered with the cluster. + */ + readonly getRunners: Effect.Effect, PersistenceError> + + /** + * Set the health status of the given runner. + */ + readonly setRunnerHealth: (address: RunnerAddress, healthy: boolean) => Effect.Effect + + /** + * Try to acquire the given shard ids for processing. + * + * It returns an array of shards it was able to acquire. + */ + readonly acquire: ( + address: RunnerAddress, + shardIds: Iterable + ) => Effect.Effect, PersistenceError> + + /** + * Refresh the locks owned by the given runner. + */ + readonly refresh: ( + address: RunnerAddress, + shardIds: Iterable + ) => Effect.Effect, PersistenceError> + + /** + * Release the given shard ids. + */ + readonly release: ( + address: RunnerAddress, + shardId: ShardId + ) => Effect.Effect + + /** + * Release all the shards assigned to the given runner. + */ + readonly releaseAll: (address: RunnerAddress) => Effect.Effect +}>() {} + +/** + * @since 1.0.0 + * @category Encoded + */ +export interface Encoded { + /** + * Get all runners registered with the cluster. + */ + readonly getRunners: Effect.Effect, PersistenceError> + + /** + * Register a new runner with the cluster. + */ + readonly register: (address: string, runner: string, healthy: boolean) => Effect.Effect + + /** + * Unregister the runner with the given address. + */ + readonly unregister: (address: string) => Effect.Effect + + /** + * Set the health status of the given runner. + */ + readonly setRunnerHealth: (address: string, healthy: boolean) => Effect.Effect + + /** + * Acquire the lock on the given shards, returning the shards that were + * successfully locked. + */ + readonly acquire: ( + address: string, + shardIds: NonEmptyArray + ) => Effect.Effect, PersistenceError> + + /** + * Refresh the lock on the given shards, returning the shards that were + * successfully locked. + */ + readonly refresh: ( + address: string, + shardIds: Array + ) => Effect.Effect, PersistenceError> + + /** + * Release the lock on the given shard. + */ + readonly release: ( + address: string, + shardId: string + ) => Effect.Effect + + /** + * Release the lock on all shards for the given runner. + */ + readonly releaseAll: (address: string) => Effect.Effect +} + +/** + * @since 1.0.0 + * @category layers + */ +export const makeEncoded = (encoded: Encoded) => + RunnerStorage.of({ + getRunners: Effect.gen(function*() { + const runners = yield* encoded.getRunners + const results: Array<[Runner, boolean]> = [] + for (let i = 0; i < runners.length; i++) { + const [runner, healthy] = runners[i] + try { + results.push([Runner.decodeSync(runner), healthy]) + } catch { + // + } + } + return results + }), + register: (runner, healthy) => + Effect.map( + encoded.register(encodeRunnerAddress(runner.address), Runner.encodeSync(runner), healthy), + MachineId.make + ), + unregister: (address) => encoded.unregister(encodeRunnerAddress(address)), + setRunnerHealth: (address, healthy) => encoded.setRunnerHealth(encodeRunnerAddress(address), healthy), + acquire: (address, shardIds) => { + const arr = Array.from(shardIds, (id) => id.toString()) + if (!isNonEmptyArray(arr)) return Effect.succeed([]) + return encoded.acquire(encodeRunnerAddress(address), arr).pipe( + Effect.map((shards) => shards.map(ShardId.fromString)) + ) + }, + refresh: (address, shardIds) => + encoded.refresh(encodeRunnerAddress(address), Array.from(shardIds, (id) => id.toString())).pipe( + Effect.map((shards) => shards.map(ShardId.fromString)) + ), + release(address, shardId) { + return encoded.release(encodeRunnerAddress(address), shardId.toString()) + }, + releaseAll(address) { + return encoded.releaseAll(encodeRunnerAddress(address)) + } + }) + +/** + * @since 1.0.0 + * @category constructors + */ +export const makeMemory = Effect.gen(function*() { + const runners = MutableHashMap.empty() + let acquired: Array = [] + let id = 0 + + return RunnerStorage.of({ + getRunners: Effect.sync(() => Array.from(MutableHashMap.values(runners), (runner) => [runner, true])), + register: (runner) => + Effect.sync(() => { + MutableHashMap.set(runners, runner.address, runner) + return MachineId.make(id++) + }), + unregister: (address) => + Effect.sync(() => { + MutableHashMap.remove(runners, address) + }), + setRunnerHealth: () => Effect.void, + acquire: (_address, shardIds) => { + acquired = Array.from(shardIds) + return Effect.succeed(Array.from(shardIds)) + }, + refresh: () => Effect.sync(() => acquired), + release: () => Effect.void, + releaseAll: () => Effect.void + }) +}) + +/** + * @since 1.0.0 + * @category layers + */ +export const layerMemory: Layer.Layer = Layer.effect(RunnerStorage)(makeMemory) + +// ------------------------------------------------------------------------------------- +// internal +// ------------------------------------------------------------------------------------- + +const encodeRunnerAddress = (runnerAddress: RunnerAddress) => `${runnerAddress.host}:${runnerAddress.port}` diff --git a/packages/cluster/src/Runners.ts b/packages/cluster/src/Runners.ts index 1b08ad839c8..e675f24362c 100644 --- a/packages/cluster/src/Runners.ts +++ b/packages/cluster/src/Runners.ts @@ -17,13 +17,7 @@ import * as RcMap from "effect/RcMap" import * as Schema from "effect/Schema" import type { Scope } from "effect/Scope" import type { PersistenceError } from "./ClusterError.js" -import { - AlreadyProcessingMessage, - EntityNotAssignedToRunner, - EntityNotManagedByRunner, - MailboxFull, - RunnerUnavailable -} from "./ClusterError.js" +import { AlreadyProcessingMessage, EntityNotAssignedToRunner, MailboxFull, RunnerUnavailable } from "./ClusterError.js" import { Persisted } from "./ClusterSchema.js" import * as Envelope from "./Envelope.js" import * as Message from "./Message.js" @@ -56,13 +50,13 @@ export class Runners extends Context.Tag("@effect/cluster/Runners") ) => Effect.Effect< void, - EntityNotManagedByRunner | EntityNotAssignedToRunner | MailboxFull | AlreadyProcessingMessage + EntityNotAssignedToRunner | MailboxFull | AlreadyProcessingMessage > readonly simulateRemoteSerialization: boolean } ) => Effect.Effect< void, - EntityNotManagedByRunner | EntityNotAssignedToRunner | MailboxFull | AlreadyProcessingMessage | PersistenceError + EntityNotAssignedToRunner | MailboxFull | AlreadyProcessingMessage | PersistenceError > /** @@ -75,7 +69,6 @@ export class Runners extends Context.Tag("@effect/cluster/Runners") Effect.Effect< void, - | EntityNotManagedByRunner | EntityNotAssignedToRunner | RunnerUnavailable | MailboxFull @@ -92,7 +85,7 @@ export class Runners extends Context.Tag("@effect/cluster/Runners") readonly discard: boolean } - ) => Effect.Effect + ) => Effect.Effect /** * Notify the current Runner that a message is available, then read replies from @@ -106,11 +99,16 @@ export class Runners extends Context.Tag("@effect/cluster/Runners") readonly notify: ( options: Message.IncomingLocal - ) => Effect.Effect + ) => Effect.Effect readonly discard: boolean readonly storageOnly?: boolean | undefined } - ) => Effect.Effect + ) => Effect.Effect + + /** + * Mark a Runner as unavailable. + */ + readonly onRunnerUnavailable: (address: RunnerAddress) => Effect.Effect }>() {} /** @@ -243,6 +241,7 @@ export const make: (options: Omit) for (const message of entry.messages) { yield* message.respond(reply) } + // wait for ack yield* entry.latch.await } entry.replies = [] @@ -266,6 +265,8 @@ export const make: (options: Omit) const storageLatch = Effect.unsafeMakeLatch(false) if (storage !== MessageStorage.noop) { yield* Effect.gen(function*() { + const foundRequests = new Set() + while (true) { yield* storageLatch.await storageLatch.unsafeClose() @@ -283,8 +284,6 @@ export const make: (options: Omit) ) ) - const foundRequests = new Set() - // put the replies into the storage requests and then open the latches for (let i = 0; i < replies.length; i++) { const reply = replies[i] @@ -296,6 +295,7 @@ export const make: (options: Omit) } foundRequests.forEach((entry) => entry.latch.unsafeOpen()) + foundRequests.clear() } }).pipe( Effect.interruptible, @@ -350,12 +350,7 @@ export const make: (options: Omit) address: options_.address.value, message }), - (error) => { - if (error._tag === "EntityNotManagedByRunner") { - return Effect.fail(error) - } - return replyFromStorage(message) - } + (_) => replyFromStorage(message) ) } return options.notify(options_).pipe( @@ -372,17 +367,9 @@ export const make: (options: Omit) () => Effect.void ) } else if (!duplicate && options.storageOnly !== true) { - return storage.registerReplyHandler( - message, - Effect.suspend(() => - replyFromStorage(message).pipe( - Effect.forkIn(runnersScope), - Effect.interruptible - ) - ) - ).pipe( - Effect.andThen(options.notify(Message.incomingLocalFromOutgoing(message))), - Effect.catchTag("EntityNotAssignedToRunner", () => Effect.void) + return options.notify(Message.incomingLocalFromOutgoing(message)).pipe( + Effect.andThen(storage.registerReplyHandler(message)), + Effect.catchTag("EntityNotAssignedToRunner", () => replyFromStorage(message)) ) } return options.notify(Message.incomingLocalFromOutgoing(message)).pipe( @@ -403,9 +390,10 @@ export const makeNoop: Effect.Effect< never, MessageStorage.MessageStorage | Snowflake.Generator | ShardingConfig | Scope > = make({ - send: ({ message }) => Effect.fail(new EntityNotManagedByRunner({ address: message.envelope.address })), + send: ({ message }) => Effect.fail(new EntityNotAssignedToRunner({ address: message.envelope.address })), notify: () => Effect.void, - ping: () => Effect.void + ping: () => Effect.void, + onRunnerUnavailable: () => Effect.void }) /** @@ -419,12 +407,10 @@ export const layerNoop: Layer.Layer< > = Layer.scoped(Runners, makeNoop).pipe(Layer.provide([Snowflake.layerGenerator])) const rpcErrors: Schema.Union<[ - typeof EntityNotManagedByRunner, typeof EntityNotAssignedToRunner, typeof MailboxFull, typeof AlreadyProcessingMessage ]> = Schema.Union( - EntityNotManagedByRunner, EntityNotAssignedToRunner, MailboxFull, AlreadyProcessingMessage @@ -441,7 +427,7 @@ export class Rpcs extends RpcGroup.make( envelope: Envelope.PartialEncoded }, success: Schema.Void, - error: Schema.Union(EntityNotManagedByRunner, EntityNotAssignedToRunner, AlreadyProcessingMessage) + error: Schema.Union(EntityNotAssignedToRunner, AlreadyProcessingMessage) }), Rpc.make("Effect", { payload: { @@ -510,7 +496,12 @@ export const makeRpc: Effect.Effect< ping(address) { return RcMap.get(clients, address).pipe( Effect.flatMap((client) => client.Ping()), - Effect.catchAllCause(() => Effect.fail(new RunnerUnavailable({ address }))), + Effect.catchAllCause(() => { + return Effect.zipRight( + RcMap.invalidate(clients, address), + Effect.fail(new RunnerUnavailable({ address })) + ) + }), Effect.scoped ) }, @@ -603,14 +594,10 @@ export const makeRpc: Effect.Effect< return RcMap.get(clients, address.value).pipe( Effect.flatMap((client) => client.Notify({ envelope })), Effect.scoped, - Effect.catchAll((error) => { - if (error._tag === "EntityNotManagedByRunner") { - return Effect.fail(error) - } - return Effect.void - }) + Effect.ignore ) - } + }, + onRunnerUnavailable: (address) => RcMap.invalidate(clients, address) }) }) diff --git a/packages/cluster/src/ShardId.ts b/packages/cluster/src/ShardId.ts index 4ece6c34fc0..b53f8257347 100644 --- a/packages/cluster/src/ShardId.ts +++ b/packages/cluster/src/ShardId.ts @@ -23,7 +23,17 @@ const constDisableValidation = { disableValidation: true } * @since 1.0.0 * @category Constructors */ -export const make = (group: string, id: number): ShardId => new ShardId({ group, id }, constDisableValidation) +export const make = (group: string, id: number): ShardId => { + const key = `${group}:${id}` + let shardId = shardIdCache.get(key) + if (!shardId) { + shardId = new ShardId({ group, id }, constDisableValidation) + shardIdCache.set(key, shardId) + } + return shardId +} + +const shardIdCache = new Map() /** * @since 1.0.0 @@ -89,9 +99,10 @@ export class ShardId extends S.Class("@effect/cluster/ShardId")({ } /** - * @since 1.0.0 + * @since 4.0.0 */ static fromString(s: string): ShardId { - return new ShardId(ShardId.fromStringEncoded(s), constDisableValidation) + const encoded = ShardId.fromStringEncoded(s) + return make(encoded.group, encoded.id) } } diff --git a/packages/cluster/src/ShardManager.ts b/packages/cluster/src/ShardManager.ts deleted file mode 100644 index 878f39b9e5d..00000000000 --- a/packages/cluster/src/ShardManager.ts +++ /dev/null @@ -1,823 +0,0 @@ -/** - * @since 1.0.0 - */ -import * as Rpc from "@effect/rpc/Rpc" -import * as RpcClient from "@effect/rpc/RpcClient" -import * as RpcGroup from "@effect/rpc/RpcGroup" -import * as RpcServer from "@effect/rpc/RpcServer" -import * as Arr from "effect/Array" -import * as Clock from "effect/Clock" -import * as Config_ from "effect/Config" -import type { ConfigError } from "effect/ConfigError" -import * as ConfigProvider from "effect/ConfigProvider" -import * as Context from "effect/Context" -import * as Data from "effect/Data" -import * as Deferred from "effect/Deferred" -import * as Duration from "effect/Duration" -import * as Effect from "effect/Effect" -import * as Equal from "effect/Equal" -import * as FiberSet from "effect/FiberSet" -import { identity } from "effect/Function" -import * as Iterable from "effect/Iterable" -import * as Layer from "effect/Layer" -import * as Mailbox from "effect/Mailbox" -import * as Metric from "effect/Metric" -import * as MetricLabel from "effect/MetricLabel" -import * as MutableHashMap from "effect/MutableHashMap" -import * as MutableHashSet from "effect/MutableHashSet" -import * as Option from "effect/Option" -import * as PubSub from "effect/PubSub" -import * as Queue from "effect/Queue" -import * as Schedule from "effect/Schedule" -import * as Schema from "effect/Schema" -import type { Scope } from "effect/Scope" -import { RunnerNotRegistered } from "./ClusterError.js" -import * as ClusterMetrics from "./ClusterMetrics.js" -import { addAllNested, decideAssignmentsForShards, State } from "./internal/shardManager.js" -import * as MachineId from "./MachineId.js" -import { Runner } from "./Runner.js" -import { RunnerAddress } from "./RunnerAddress.js" -import { RunnerHealth } from "./RunnerHealth.js" -import { RpcClientProtocol, Runners } from "./Runners.js" -import { make as makeShardId, ShardId } from "./ShardId.js" -import { ShardingConfig } from "./ShardingConfig.js" -import { ShardStorage } from "./ShardStorage.js" - -/** - * @since 1.0.0 - * @category models - */ -export class ShardManager extends Context.Tag("@effect/cluster/ShardManager")]> - > - /** - * Get a stream of sharding events emit by the shard manager. - */ - readonly shardingEvents: ( - address: Option.Option - ) => Effect.Effect, RunnerNotRegistered, Scope> - /** - * Register a new runner with the cluster. - */ - readonly register: (runner: Runner) => Effect.Effect - /** - * Unregister a runner from the cluster. - */ - readonly unregister: (address: RunnerAddress) => Effect.Effect - /** - * Rebalance shards assigned to runners within the cluster. - */ - readonly rebalance: Effect.Effect - /** - * Notify the cluster of an unhealthy runner. - */ - readonly notifyUnhealthyRunner: (address: RunnerAddress) => Effect.Effect - /** - * Check and repot on the health of all runners in the cluster. - */ - readonly checkRunnerHealth: Effect.Effect -}>() {} - -/** - * @since 1.0.0 - * @category Config - */ -export class Config extends Context.Tag("@effect/cluster/ShardManager/Config")() { - /** - * @since 1.0.0 - */ - static readonly defaults: Config["Type"] = { - rebalanceDebounce: Duration.seconds(3), - rebalanceInterval: Duration.seconds(20), - rebalanceRetryInterval: Duration.seconds(10), - rebalanceRate: 2 / 100, - persistRetryCount: 100, - persistRetryInterval: Duration.seconds(3), - runnerHealthCheckInterval: Duration.minutes(1), - runnerPingTimeout: Duration.seconds(3) - } -} - -/** - * @since 1.0.0 - * @category Config - */ -export const configConfig: Config_.Config = Config_.all({ - rebalanceDebounce: Config_.duration("rebalanceDebounce").pipe( - Config_.withDefault(Config.defaults.rebalanceDebounce), - Config_.withDescription("The duration to wait before rebalancing shards after a change.") - ), - rebalanceInterval: Config_.duration("rebalanceInterval").pipe( - Config_.withDefault(Config.defaults.rebalanceInterval), - Config_.withDescription("The interval on which regular rebalancing of shards will occur.") - ), - rebalanceRetryInterval: Config_.duration("rebalanceRetryInterval").pipe( - Config_.withDefault(Config.defaults.rebalanceRetryInterval), - Config_.withDescription( - "The interval on which rebalancing of shards which failed to be rebalanced will be retried." - ) - ), - rebalanceRate: Config_.number("rebalanceRate").pipe( - Config_.withDefault(Config.defaults.rebalanceRate), - Config_.withDescription("The maximum ratio of shards to rebalance at once.") - ), - persistRetryCount: Config_.integer("persistRetryCount").pipe( - Config_.withDefault(Config.defaults.persistRetryCount), - Config_.withDescription("The number of times persistence of runners will be retried if it fails.") - ), - persistRetryInterval: Config_.duration("persistRetryInterval").pipe( - Config_.withDefault(Config.defaults.persistRetryInterval), - Config_.withDescription("The interval on which persistence of runners will be retried if it fails.") - ), - runnerHealthCheckInterval: Config_.duration("runnerHealthCheckInterval").pipe( - Config_.withDefault(Config.defaults.runnerHealthCheckInterval), - Config_.withDescription("The interval on which runner health will be checked.") - ), - runnerPingTimeout: Config_.duration("runnerPingTimeout").pipe( - Config_.withDefault(Config.defaults.runnerPingTimeout), - Config_.withDescription("The length of time to wait for a runner to respond to a ping.") - ) -}) - -/** - * @since 1.0.0 - * @category Config - */ -export const configFromEnv: Effect.Effect = configConfig.pipe( - Effect.withConfigProvider( - ConfigProvider.fromEnv().pipe( - ConfigProvider.constantCase - ) - ) -) - -/** - * @since 1.0.0 - * @category Config - */ -export const layerConfig = (config?: Partial | undefined): Layer.Layer => - Layer.succeed(Config, { - ...Config.defaults, - ...config - }) - -/** - * @since 1.0.0 - * @category Config - */ -export const layerConfigFromEnv = (config?: Partial | undefined): Layer.Layer => - Layer.effect(Config, config ? Effect.map(configFromEnv, (env) => ({ ...env, ...config })) : configFromEnv) - -/** - * Represents a client which can be used to communicate with the - * `ShardManager`. - * - * @since 1.0.0 - * @category Client - */ -export class ShardManagerClient - extends Context.Tag("@effect/cluster/ShardManager/ShardManagerClient")) => Effect.Effect - /** - * Unregister a runner from the cluster. - */ - readonly unregister: (address: RunnerAddress) => Effect.Effect - /** - * Notify the cluster of an unhealthy runner. - */ - readonly notifyUnhealthyRunner: (address: RunnerAddress) => Effect.Effect - /** - * Get all shard assignments. - */ - readonly getAssignments: Effect.Effect< - Iterable]> - > - /** - * Get a stream of sharding events emit by the shard manager. - */ - readonly shardingEvents: ( - address: Option.Option - ) => Effect.Effect, never, Scope> - /** - * Get the current time on the shard manager. - */ - readonly getTime: Effect.Effect - }>() -{} - -/** - * @since 1.0.0 - * @category models - */ -export const ShardingEventSchema = Schema.Union( - Schema.TaggedStruct("StreamStarted", {}), - Schema.TaggedStruct("ShardsAssigned", { - address: RunnerAddress, - shards: Schema.Array(ShardId) - }), - Schema.TaggedStruct("ShardsUnassigned", { - address: RunnerAddress, - shards: Schema.Array(ShardId) - }), - Schema.TaggedStruct("RunnerRegistered", { - address: RunnerAddress - }), - Schema.TaggedStruct("RunnerUnregistered", { - address: RunnerAddress - }) -) satisfies Schema.Schema - -/** - * The messaging protocol for the `ShardManager`. - * - * @since 1.0.0 - * @category Rpcs - */ -export class Rpcs extends RpcGroup.make( - Rpc.make("Register", { - payload: { runner: Runner }, - success: MachineId.MachineId - }), - Rpc.make("Unregister", { - payload: { address: RunnerAddress } - }), - Rpc.make("NotifyUnhealthyRunner", { - payload: { address: RunnerAddress } - }), - Rpc.make("GetAssignments", { - success: Schema.Array(Schema.Tuple(ShardId, Schema.Option(RunnerAddress))) - }), - Rpc.make("ShardingEvents", { - payload: { address: Schema.Option(RunnerAddress) }, - success: ShardingEventSchema, - error: RunnerNotRegistered, - stream: true - }), - Rpc.make("GetTime", { - success: Schema.Number - }) -) {} - -/** - * @since 1.0.0 - * @category models - */ -export type ShardingEvent = Data.TaggedEnum<{ - StreamStarted: {} - ShardsAssigned: { - address: RunnerAddress - shards: ReadonlyArray - } - ShardsUnassigned: { - address: RunnerAddress - shards: ReadonlyArray - } - RunnerRegistered: { address: RunnerAddress } - RunnerUnregistered: { address: RunnerAddress } -}> - -/** - * @since 1.0.0 - * @category models - */ -export const ShardingEvent = Data.taggedEnum() - -/** - * @since 1.0.0 - * @category Client - */ -export const makeClientLocal = Effect.gen(function*() { - const config = yield* ShardingConfig - const clock = yield* Effect.clock - - const groups = new Set() - const shards = MutableHashMap.empty>() - - let machineId = 0 - - return ShardManagerClient.of({ - register: (_, groupsToAdd) => - Effect.sync(() => { - for (const group of groupsToAdd) { - if (groups.has(group)) continue - groups.add(group) - for (let n = 1; n <= config.shardsPerGroup; n++) { - MutableHashMap.set(shards, makeShardId(group, n), config.runnerAddress) - } - } - return MachineId.make(++machineId) - }), - unregister: () => Effect.void, - notifyUnhealthyRunner: () => Effect.void, - getAssignments: Effect.succeed(shards), - shardingEvents: Effect.fnUntraced(function*(_address) { - const mailbox = yield* Mailbox.make() - yield* mailbox.offer(ShardingEvent.StreamStarted()) - return mailbox - }), - getTime: clock.currentTimeMillis - }) -}) - -/** - * @since 1.0.0 - * @category Client - */ -export const makeClientRpc: Effect.Effect< - ShardManagerClient["Type"], - never, - ShardingConfig | RpcClient.Protocol | Scope -> = Effect.gen(function*() { - const config = yield* ShardingConfig - const client = yield* RpcClient.make(Rpcs, { - spanPrefix: "ShardManagerClient", - disableTracing: true - }) - - return ShardManagerClient.of({ - register: (address, groups) => - client.Register({ runner: Runner.make({ address, version: config.serverVersion, groups }) }).pipe( - Effect.orDie - ), - unregister: (address) => Effect.orDie(client.Unregister({ address })), - notifyUnhealthyRunner: (address) => Effect.orDie(client.NotifyUnhealthyRunner({ address })), - getAssignments: Effect.orDie(client.GetAssignments()), - shardingEvents: (address) => - Mailbox.make().pipe( - Effect.tap(Effect.fnUntraced( - function*(mailbox) { - const events = yield* client.ShardingEvents({ address }, { asMailbox: true }) - const take = Effect.orDie(events.takeAll) - while (true) { - mailbox.unsafeOfferAll((yield* take)[0]) - } - }, - (effect, mb) => Mailbox.into(effect, mb), - Effect.forkScoped - )) - ), - getTime: Effect.orDie(client.GetTime()) - }) -}) - -/** - * @since 1.0.0 - * @category Client - */ -export const layerClientLocal: Layer.Layer< - ShardManagerClient, - never, - ShardingConfig -> = Layer.effect(ShardManagerClient, makeClientLocal) - -/** - * @since 1.0.0 - * @category Client - */ -export const layerClientRpc: Layer.Layer< - ShardManagerClient, - never, - ShardingConfig | RpcClientProtocol -> = Layer.scoped(ShardManagerClient, makeClientRpc).pipe( - Layer.provide(Layer.scoped( - RpcClient.Protocol, - Effect.gen(function*() { - const config = yield* ShardingConfig - const clientProtocol = yield* RpcClientProtocol - return yield* clientProtocol(config.shardManagerAddress) - }) - )) -) - -/** - * @since 1.0.0 - * @category Constructors - */ -export const make = Effect.gen(function*() { - const storage = yield* ShardStorage - const runnersApi = yield* Runners - const runnerHealthApi = yield* RunnerHealth - const clock = yield* Effect.clock - const config = yield* Config - const shardingConfig = yield* ShardingConfig - - const state = yield* Effect.orDie(State.fromStorage(shardingConfig.shardsPerGroup)) - const scope = yield* Effect.scope - const events = yield* PubSub.unbounded() - - function updateRunnerMetrics() { - ClusterMetrics.runners.unsafeUpdate(MutableHashMap.size(state.allRunners), []) - } - - function updateShardMetrics() { - const stats = state.shardStats - for (const [address, shardCount] of stats.perRunner) { - ClusterMetrics.assignedShards.unsafeUpdate( - shardCount, - [MetricLabel.make("address", address)] - ) - } - ClusterMetrics.unassignedShards.unsafeUpdate(stats.unassigned, []) - } - updateShardMetrics() - - function withRetry(effect: Effect.Effect): Effect.Effect { - return effect.pipe( - Effect.retry({ - schedule: Schedule.spaced(config.persistRetryCount), - times: config.persistRetryCount - }), - Effect.ignore - ) - } - - const persistRunners = Effect.unsafeMakeSemaphore(1).withPermits(1)(withRetry( - Effect.suspend(() => - storage.saveRunners( - Iterable.map(state.allRunners, ([address, runner]) => [address, runner.runner]) - ) - ) - )) - - const persistAssignments = Effect.unsafeMakeSemaphore(1).withPermits(1)(withRetry( - Effect.suspend(() => storage.saveAssignments(state.assignments)) - )) - - const notifyUnhealthyRunner = Effect.fnUntraced(function*(address: RunnerAddress) { - if (!MutableHashMap.has(state.allRunners, address)) return - - if (!(yield* runnerHealthApi.isAlive(address))) { - yield* Effect.logWarning(`Runner at address '${address.toString()}' is not alive`) - yield* unregister(address) - } - }) - - function updateShardsState( - shards: Iterable, - address: Option.Option - ): Effect.Effect { - return Effect.suspend(() => { - if (Option.isSome(address) && !MutableHashMap.has(state.allRunners, address.value)) { - return Effect.fail(new RunnerNotRegistered({ address: address.value })) - } - state.addAssignments(shards, address) - return Effect.void - }) - } - - const getAssignments = Effect.sync(() => state.assignments) - - let machineId = 0 - const register = Effect.fnUntraced(function*(runner: Runner) { - yield* Effect.logInfo(`Registering runner ${Runner.pretty(runner)}`) - - const current = MutableHashMap.get(state.allRunners, runner.address).pipe( - Option.filter((r) => r.runner.version === runner.version) - ) - if (Option.isSome(current)) { - return MachineId.make(++machineId) - } - - state.addRunner(runner, clock.unsafeCurrentTimeMillis()) - updateRunnerMetrics() - yield* PubSub.publish(events, ShardingEvent.RunnerRegistered({ address: runner.address })) - yield* Effect.forkIn(persistRunners, scope) - yield* Effect.forkIn(rebalance, scope) - return MachineId.make(++machineId) - }) - - const unregister = Effect.fnUntraced(function*(address: RunnerAddress) { - if (!MutableHashMap.has(state.allRunners, address)) return - - yield* Effect.logInfo("Unregistering runner at address:", address) - const unassignments = Arr.empty() - for (const [shard, runner] of state.assignments) { - if (Option.isSome(runner) && Equal.equals(runner.value, address)) { - unassignments.push(shard) - } - } - state.addAssignments(unassignments, Option.none()) - state.removeRunner(address) - updateRunnerMetrics() - - if (unassignments.length > 0) { - yield* PubSub.publish(events, ShardingEvent.RunnerUnregistered({ address })) - } - - yield* Effect.forkIn(persistRunners, scope) - yield* Effect.forkIn(rebalance, scope) - }) - - let rebalancing = false - let rebalanceDeferred: Deferred.Deferred | undefined - const rebalanceFibers = yield* FiberSet.make() - - const rebalance = Effect.withFiberRuntime((fiber) => { - if (!rebalancing) { - rebalancing = true - return rebalanceLoop - } - if (!rebalanceDeferred) { - rebalanceDeferred = Deferred.unsafeMake(fiber.id()) - } - return Deferred.await(rebalanceDeferred) - }) - - const rebalanceLoop: Effect.Effect = Effect.suspend(() => { - const deferred = rebalanceDeferred - rebalanceDeferred = undefined - return runRebalance.pipe( - deferred ? Effect.intoDeferred(deferred) : identity, - Effect.onExit(() => { - if (!rebalanceDeferred) { - rebalancing = false - return Effect.void - } - return Effect.forkIn(rebalanceLoop, scope) - }) - ) - }) - - const runRebalance = Effect.gen(function*() { - yield* Effect.sleep(config.rebalanceDebounce) - - if (state.shards.size === 0) { - yield* Effect.logDebug("No shards to rebalance") - return - } - - // Determine which shards to assign and unassign - const assignments = MutableHashMap.empty>() - const unassignments = MutableHashMap.empty>() - const changes = MutableHashSet.empty() - for (const group of state.shards.keys()) { - const [groupAssignments, groupUnassignments, groupChanges] = decideAssignmentsForShards(state, group) - for (const [address, shards] of groupAssignments) { - addAllNested(assignments, address, Array.from(shards, (id) => makeShardId(group, id))) - } - for (const [address, shards] of groupUnassignments) { - addAllNested(unassignments, address, Array.from(shards, (id) => makeShardId(group, id))) - } - for (const address of groupChanges) { - MutableHashSet.add(changes, address) - } - } - - yield* Effect.logDebug(`Rebalancing shards`) - - if (MutableHashSet.size(changes) === 0) return - - yield* Metric.increment(ClusterMetrics.rebalances) - - // Ping runners first and remove unhealthy ones - const failedRunners = MutableHashSet.empty() - for (const address of changes) { - yield* FiberSet.run( - rebalanceFibers, - runnersApi.ping(address).pipe( - Effect.timeout(config.runnerPingTimeout), - Effect.catchAll(() => { - MutableHashSet.add(failedRunners, address) - MutableHashMap.remove(assignments, address) - MutableHashMap.remove(unassignments, address) - return Effect.void - }) - ) - ) - } - yield* FiberSet.awaitEmpty(rebalanceFibers) - - const failedUnassignments = new Set() - for (const [address, shards] of unassignments) { - yield* FiberSet.run( - rebalanceFibers, - updateShardsState(shards, Option.none()).pipe( - Effect.matchEffect({ - onFailure: () => { - MutableHashSet.add(failedRunners, address) - for (const shard of shards) { - failedUnassignments.add(shard) - } - // Remove failed runners from the assignments - MutableHashMap.remove(assignments, address) - return Effect.void - }, - onSuccess: () => - PubSub.publish(events, ShardingEvent.ShardsUnassigned({ address, shards: Array.from(shards) })) - }) - ) - ) - } - yield* FiberSet.awaitEmpty(rebalanceFibers) - - // Remove failed shard unassignments from the assignments - MutableHashMap.forEach(assignments, (shards, address) => { - for (const shard of failedUnassignments) { - MutableHashSet.remove(shards, shard) - } - if (MutableHashSet.size(shards) === 0) { - MutableHashMap.remove(assignments, address) - } - }) - - // Perform the assignments - for (const [address, shards] of assignments) { - yield* FiberSet.run( - rebalanceFibers, - updateShardsState(shards, Option.some(address)).pipe( - Effect.matchEffect({ - onFailure: () => { - MutableHashSet.add(failedRunners, address) - return Effect.void - }, - onSuccess: () => - PubSub.publish(events, ShardingEvent.ShardsAssigned({ address, shards: Array.from(shards) })) - }) - ) - ) - } - yield* FiberSet.awaitEmpty(rebalanceFibers) - - updateShardMetrics() - - const wereFailures = MutableHashSet.size(failedRunners) > 0 - if (wereFailures) { - // Check if the failing runners are still reachable - yield* Effect.forEach(failedRunners, notifyUnhealthyRunner, { discard: true }).pipe( - Effect.forkIn(scope) - ) - yield* Effect.logWarning("Failed to rebalance runners: ", failedRunners) - } - - if (wereFailures) { - // Try rebalancing again later if there were any failures - yield* Clock.sleep(config.rebalanceRetryInterval).pipe( - Effect.zipRight(rebalance), - Effect.forkIn(scope) - ) - } - - yield* persistAssignments - }).pipe(Effect.withSpan("ShardManager.rebalance", { captureStackTrace: false })) - - const checkRunnerHealth: Effect.Effect = Effect.suspend(() => - Effect.forEach(MutableHashMap.keys(state.allRunners), notifyUnhealthyRunner, { - concurrency: 10, - discard: true - }) - ) - - yield* Effect.addFinalizer(() => - persistAssignments.pipe( - Effect.catchAllCause((cause) => Effect.logWarning("Failed to persist assignments on shutdown", cause)), - Effect.zipRight(persistRunners.pipe( - Effect.catchAllCause((cause) => Effect.logWarning("Failed to persist runners on shutdown", cause)) - )) - ) - ) - - yield* Effect.forkIn(persistRunners, scope) - - // Start a regular cluster rebalance at the configured interval - yield* rebalance.pipe( - Effect.andThen(Effect.sleep(config.rebalanceInterval)), - Effect.forever, - Effect.forkIn(scope) - ) - - yield* checkRunnerHealth.pipe( - Effect.andThen(Effect.sleep(config.runnerHealthCheckInterval)), - Effect.forever, - Effect.forkIn(scope) - ) - - yield* Effect.gen(function*() { - const queue = yield* PubSub.subscribe(events) - while (true) { - yield* Effect.logInfo("Shard manager event:", yield* Queue.take(queue)) - } - }).pipe(Effect.forkIn(scope)) - - yield* Effect.logInfo("Shard manager initialized") - - return ShardManager.of({ - getAssignments, - shardingEvents: (address) => { - if (Option.isNone(address)) { - return PubSub.subscribe(events) - } - return Effect.tap(PubSub.subscribe(events), () => { - const isRegistered = MutableHashMap.has(state.allRunners, address.value) - if (isRegistered) { - return runnerHealthApi.onConnection(address.value) - } - return Effect.fail(new RunnerNotRegistered({ address: address.value })) - }) - }, - register, - unregister, - rebalance, - notifyUnhealthyRunner, - checkRunnerHealth - }) -}) - -/** - * @since 1.0.0 - * @category layer - */ -export const layer: Layer.Layer< - ShardManager, - never, - ShardStorage | RunnerHealth | Runners | Config | ShardingConfig -> = Layer.scoped(ShardManager, make) - -/** - * @since 1.0.0 - * @category Server - */ -export const layerServerHandlers = Rpcs.toLayer(Effect.gen(function*() { - const shardManager = yield* ShardManager - const clock = yield* Effect.clock - return { - Register: ({ runner }) => shardManager.register(runner), - Unregister: ({ address }) => shardManager.unregister(address), - NotifyUnhealthyRunner: ({ address }) => shardManager.notifyUnhealthyRunner(address), - GetAssignments: () => - Effect.map( - shardManager.getAssignments, - (assignments) => Array.from(assignments) - ), - ShardingEvents: Effect.fnUntraced(function*({ address }) { - const queue = yield* shardManager.shardingEvents(address) - const mailbox = yield* Mailbox.make() - - yield* mailbox.offer(ShardingEvent.StreamStarted()) - - yield* Queue.takeBetween(queue, 1, Number.MAX_SAFE_INTEGER).pipe( - Effect.flatMap((events) => mailbox.offerAll(events)), - Effect.forever, - Effect.forkScoped - ) - - return mailbox - }), - GetTime: () => clock.currentTimeMillis - } -})) - -/** - * @since 1.0.0 - * @category Server - */ -export const layerServer: Layer.Layer< - never, - never, - ShardManager | RpcServer.Protocol -> = RpcServer.layer(Rpcs, { - spanPrefix: "ShardManager", - disableTracing: true -}).pipe(Layer.provide(layerServerHandlers)) diff --git a/packages/cluster/src/ShardStorage.ts b/packages/cluster/src/ShardStorage.ts deleted file mode 100644 index 980d86a0919..00000000000 --- a/packages/cluster/src/ShardStorage.ts +++ /dev/null @@ -1,297 +0,0 @@ -/** - * @since 1.0.0 - */ -import * as Arr from "effect/Array" -import * as Context from "effect/Context" -import * as Effect from "effect/Effect" -import * as Layer from "effect/Layer" -import * as MutableHashMap from "effect/MutableHashMap" -import * as Option from "effect/Option" -import type { PersistenceError } from "./ClusterError.js" -import { Runner } from "./Runner.js" -import { RunnerAddress } from "./RunnerAddress.js" -import { ShardId } from "./ShardId.js" - -/** - * Represents a generic interface to the persistent storage required by the - * cluster. - * - * @since 1.0.0 - * @category models - */ -export class ShardStorage extends Context.Tag("@effect/cluster/ShardStorage")]>, - PersistenceError - > - - /** - * Save the current state of shards assignments to runners. - */ - readonly saveAssignments: ( - assignments: Iterable]> - ) => Effect.Effect - - /** - * Get all runners registered with the cluster. - */ - readonly getRunners: Effect.Effect, PersistenceError> - - /** - * Save the current runners registered with the cluster. - */ - readonly saveRunners: (runners: Iterable) => Effect.Effect - - /** - * Try to acquire the given shard ids for processing. - * - * It returns an array of shards it was able to acquire. - */ - readonly acquire: ( - address: RunnerAddress, - shardIds: Iterable - ) => Effect.Effect, PersistenceError> - - /** - * Refresh the locks owned by the given runner. - * - * Locks expire after 15 seconds, so this method should be called every 10 - * seconds to keep the locks alive. - */ - readonly refresh: ( - address: RunnerAddress, - shardIds: Iterable - ) => Effect.Effect, PersistenceError> - - /** - * Release the given shard ids. - */ - readonly release: ( - address: RunnerAddress, - shardId: ShardId - ) => Effect.Effect - - /** - * Release all the shards assigned to the given runner. - */ - readonly releaseAll: (address: RunnerAddress) => Effect.Effect -}>() {} - -/** - * @since 1.0.0 - * @category Encoded - */ -export interface Encoded { - /** - * Get the current assignments of shards to runners. - */ - readonly getAssignments: Effect.Effect< - Array< - readonly [ - shardId: string, - runnerAddress: string | null - ] - >, - PersistenceError - > - - /** - * Save the current state of shards assignments to runners. - */ - readonly saveAssignments: ( - assignments: Array - ) => Effect.Effect - - /** - * Get all runners registered with the cluster. - */ - readonly getRunners: Effect.Effect, PersistenceError> - - /** - * Save the current runners registered with the cluster. - */ - readonly saveRunners: ( - runners: Array - ) => Effect.Effect - - /** - * Acquire the lock on the given shards, returning the shards that were - * successfully locked. - */ - readonly acquire: ( - address: string, - shardIds: ReadonlyArray - ) => Effect.Effect, PersistenceError> - - /** - * Refresh the lock on the given shards, returning the shards that were - * successfully locked. - */ - readonly refresh: ( - address: string, - shardIds: ReadonlyArray - ) => Effect.Effect, PersistenceError> - - /** - * Release the lock on the given shard. - */ - readonly release: ( - address: string, - shardId: string - ) => Effect.Effect - - /** - * Release the lock on all shards for the given runner. - */ - readonly releaseAll: (address: string) => Effect.Effect -} - -/** - * @since 1.0.0 - * @category layers - */ -export const makeEncoded = (encoded: Encoded) => - ShardStorage.of({ - getAssignments: Effect.map(encoded.getAssignments, (assignments) => { - const arr = Arr.empty<[ShardId, Option.Option]>() - for (const [shardId, runnerAddress] of assignments) { - arr.push([ - ShardId.fromString(shardId), - runnerAddress === null ? Option.none() : Option.some(decodeRunnerAddress(runnerAddress)) - ]) - } - return arr - }), - saveAssignments: (assignments) => { - const arr = Arr.empty() - for (const [shardId, runnerAddress] of assignments) { - arr.push([ - shardId.toString(), - Option.isNone(runnerAddress) ? null : encodeRunnerAddress(runnerAddress.value) - ]) - } - return encoded.saveAssignments(arr) - }, - getRunners: Effect.gen(function*() { - const runners = yield* encoded.getRunners - const results: Array<[RunnerAddress, Runner]> = [] - for (let i = 0; i < runners.length; i++) { - const [address, runner] = runners[i] - try { - results.push([decodeRunnerAddress(address), Runner.decodeSync(runner)]) - } catch { - // - } - } - return results - }), - saveRunners: (runners) => - Effect.suspend(() => - encoded.saveRunners( - Array.from(runners, ([address, runner]) => [encodeRunnerAddress(address), Runner.encodeSync(runner)]) - ) - ), - acquire: (address, shardIds) => { - const arr = Array.from(shardIds, (id) => id.toString()) - return encoded.acquire(encodeRunnerAddress(address), arr).pipe( - Effect.map((shards) => shards.map(ShardId.fromString)) - ) - }, - refresh: (address, shardIds) => { - const arr = Array.from(shardIds, (id) => id.toString()) - return encoded.refresh(encodeRunnerAddress(address), arr).pipe( - Effect.map((shards) => shards.map(ShardId.fromString)) - ) - }, - release(address, shardId) { - return encoded.release(encodeRunnerAddress(address), shardId.toString()) - }, - releaseAll(address) { - return encoded.releaseAll(encodeRunnerAddress(address)) - } - }) - -/** - * @since 1.0.0 - * @category layers - */ -export const layerNoop: Layer.Layer = Layer.sync( - ShardStorage, - () => { - let acquired: Array = [] - return ShardStorage.of({ - getAssignments: Effect.sync(() => []), - saveAssignments: () => Effect.void, - getRunners: Effect.sync(() => []), - saveRunners: () => Effect.void, - acquire: (_address, shards) => { - acquired = Array.from(shards) - return Effect.succeed(Array.from(shards)) - }, - refresh: () => Effect.sync(() => acquired), - release: () => Effect.void, - releaseAll: () => Effect.void - }) - } -) - -/** - * @since 1.0.0 - * @category constructors - */ -export const makeMemory = Effect.gen(function*() { - const assignments = MutableHashMap.empty>() - const runners = MutableHashMap.empty() - - function saveAssignments(value: Iterable]>) { - return Effect.sync(() => { - for (const [shardId, runnerAddress] of value) { - MutableHashMap.set(assignments, shardId, runnerAddress) - } - }) - } - - function saveRunners(value: Iterable) { - return Effect.sync(() => { - for (const [address, runner] of value) { - MutableHashMap.set(runners, address, runner) - } - }) - } - - let acquired: Array = [] - - return ShardStorage.of({ - getAssignments: Effect.sync(() => Array.from(assignments)), - saveAssignments, - getRunners: Effect.sync(() => Array.from(runners)), - saveRunners, - acquire: (_address, shardIds) => { - acquired = Array.from(shardIds) - return Effect.succeed(Array.from(shardIds)) - }, - refresh: () => Effect.sync(() => acquired), - release: () => Effect.void, - releaseAll: () => Effect.void - }) -}) - -/** - * @since 1.0.0 - * @category layers - */ -export const layerMemory: Layer.Layer = Layer.effect(ShardStorage, makeMemory) - -// ------------------------------------------------------------------------------------- -// internal -// ------------------------------------------------------------------------------------- - -const encodeRunnerAddress = (runnerAddress: RunnerAddress) => `${runnerAddress.host}:${runnerAddress.port}` - -const decodeRunnerAddress = (runnerAddress: string): RunnerAddress => { - const [host, port] = runnerAddress.split(":") - return new RunnerAddress({ host, port: Number(port) }) -} diff --git a/packages/cluster/src/Sharding.ts b/packages/cluster/src/Sharding.ts index fb6aa5a2589..9a66e837ae6 100644 --- a/packages/cluster/src/Sharding.ts +++ b/packages/cluster/src/Sharding.ts @@ -7,41 +7,36 @@ import { type FromServer, RequestId } from "@effect/rpc/RpcMessage" import * as Arr from "effect/Array" import * as Cause from "effect/Cause" import * as Context from "effect/Context" -import * as Deferred from "effect/Deferred" import type { DurationInput } from "effect/Duration" import * as Effect from "effect/Effect" +import * as Either from "effect/Either" import * as Equal from "effect/Equal" -import * as Exit from "effect/Exit" import * as Fiber from "effect/Fiber" -import * as FiberHandle from "effect/FiberHandle" import * as FiberMap from "effect/FiberMap" import * as FiberRef from "effect/FiberRef" +import * as FiberSet from "effect/FiberSet" import { constant } from "effect/Function" import * as HashMap from "effect/HashMap" -import * as Iterable from "effect/Iterable" +import * as HashRing from "effect/HashRing" import * as Layer from "effect/Layer" import * as MutableHashMap from "effect/MutableHashMap" import * as MutableHashSet from "effect/MutableHashSet" import * as MutableRef from "effect/MutableRef" import * as Option from "effect/Option" -import * as Predicate from "effect/Predicate" import * as PubSub from "effect/PubSub" import * as Schedule from "effect/Schedule" import * as Scope from "effect/Scope" import * as Stream from "effect/Stream" import type { MailboxFull, PersistenceError } from "./ClusterError.js" -import { - AlreadyProcessingMessage, - EntityNotAssignedToRunner, - EntityNotManagedByRunner, - RunnerUnavailable -} from "./ClusterError.js" -import * as ClusterError from "./ClusterError.js" +import { AlreadyProcessingMessage, EntityNotAssignedToRunner } from "./ClusterError.js" +import * as ClusterMetrics from "./ClusterMetrics.js" import { Persisted, Uninterruptible } from "./ClusterSchema.js" import * as ClusterSchema from "./ClusterSchema.js" import type { CurrentAddress, CurrentRunnerAddress, Entity, HandlersFrom } from "./Entity.js" -import { EntityAddress } from "./EntityAddress.js" -import { EntityId } from "./EntityId.js" +import type { EntityAddress } from "./EntityAddress.js" +import { make as makeEntityAddress } from "./EntityAddress.js" +import type { EntityId } from "./EntityId.js" +import { make as makeEntityId } from "./EntityId.js" import * as Envelope from "./Envelope.js" import * as EntityManager from "./internal/entityManager.js" import { EntityReaper } from "./internal/entityReaper.js" @@ -51,13 +46,15 @@ import { ResourceMap } from "./internal/resourceMap.js" import * as Message from "./Message.js" import * as MessageStorage from "./MessageStorage.js" import * as Reply from "./Reply.js" +import { Runner } from "./Runner.js" import type { RunnerAddress } from "./RunnerAddress.js" +import * as RunnerHealth from "./RunnerHealth.js" import { Runners } from "./Runners.js" -import { ShardId } from "./ShardId.js" +import { RunnerStorage } from "./RunnerStorage.js" +import type { ShardId } from "./ShardId.js" +import { make as makeShardId } from "./ShardId.js" import { ShardingConfig } from "./ShardingConfig.js" import { EntityRegistered, type ShardingRegistrationEvent, SingletonRegistered } from "./ShardingRegistrationEvent.js" -import { ShardManagerClient } from "./ShardManager.js" -import { ShardStorage } from "./ShardStorage.js" import { SingletonAddress } from "./SingletonAddress.js" import * as Snowflake from "./Snowflake.js" @@ -78,6 +75,16 @@ export class Sharding extends Context.Tag("@effect/cluster/Sharding") ShardId + /** + * Returns `true` if the specified `shardId` is assigned to this runner. + */ + readonly hasShardId: (shardId: ShardId) => boolean + + /** + * Generate a Snowflake ID that is unique to this runner. + */ + readonly getSnowflake: Effect.Effect + /** * Returns `true` if sharding is shutting down, `false` otherwise. */ @@ -94,7 +101,7 @@ export class Sharding extends Context.Tag("@effect/cluster/Sharding") RpcClient.RpcClient.From< Rpcs, - MailboxFull | AlreadyProcessingMessage | PersistenceError | EntityNotManagedByRunner + MailboxFull | AlreadyProcessingMessage | PersistenceError > > @@ -134,7 +141,7 @@ export class Sharding extends Context.Tag("@effect/cluster/Sharding")) => Effect.Effect< void, - EntityNotManagedByRunner | EntityNotAssignedToRunner | MailboxFull | AlreadyProcessingMessage + EntityNotAssignedToRunner | MailboxFull | AlreadyProcessingMessage > /** @@ -145,15 +152,17 @@ export class Sharding extends Context.Tag("@effect/cluster/Sharding") Effect.Effect< void, - EntityNotManagedByRunner | MailboxFull | AlreadyProcessingMessage | PersistenceError + MailboxFull | AlreadyProcessingMessage | PersistenceError > /** * Notify sharding that a message has been persisted to storage. */ - readonly notify: (message: Message.Incoming) => Effect.Effect< + readonly notify: (message: Message.Incoming, options?: { + readonly waitUntilRead?: boolean | undefined + }) => Effect.Effect< void, - EntityNotManagedByRunner | EntityNotAssignedToRunner | AlreadyProcessingMessage + EntityNotAssignedToRunner | AlreadyProcessingMessage > /** @@ -180,20 +189,25 @@ interface EntityManagerState { readonly entity: Entity readonly scope: Scope.CloseableScope readonly manager: EntityManager.EntityManager + closed: boolean } const make = Effect.gen(function*() { const config = yield* ShardingConfig - const runners = yield* Runners - const shardManager = yield* ShardManagerClient + const runnersService = yield* Runners + const runnerHealth = yield* RunnerHealth.RunnerHealth const snowflakeGen = yield* Snowflake.Generator const shardingScope = yield* Effect.scope const isShutdown = MutableRef.make(false) + const fiberSet = yield* FiberSet.make() + const runFork = yield* FiberSet.runtime(fiberSet)().pipe( + Effect.mapInputContext((context: Context.Context) => Context.omit(Scope.Scope)(context)) + ) const storage = yield* MessageStorage.MessageStorage const storageEnabled = storage !== MessageStorage.noop - const shardStorage = yield* ShardStorage + const runnerStorage = yield* RunnerStorage const entityManagers = new Map() @@ -212,7 +226,7 @@ const make = Effect.gen(function*() { function getShardId(entityId: EntityId, group: string): ShardId { const id = Math.abs(hashString(entityId) % config.shardsPerGroup) + 1 - return ShardId.make({ group, id }, { disableValidation: true }) + return makeShardId(group, id) } function isEntityOnLocalShards(address: EntityAddress): boolean { @@ -220,17 +234,23 @@ const make = Effect.gen(function*() { } // --- Shard acquisition --- + // + // Responsible for acquiring and releasing shards from RunnerStorage. + // + // This should be shutdown last, when all entities have been shutdown, to + // allow them to move to another runner. + const releasingShards = MutableHashSet.empty() if (Option.isSome(config.runnerAddress)) { const selfAddress = config.runnerAddress.value yield* Scope.addFinalizerExit(shardingScope, () => { // the locks expire over time, so if this fails we ignore it - return Effect.ignore(shardStorage.releaseAll(selfAddress)) + return Effect.ignore(runnerStorage.releaseAll(selfAddress)) }) - const releasingShards = MutableHashSet.empty() yield* Effect.gen(function*() { activeShardsLatch.unsafeOpen() + while (true) { yield* activeShardsLatch.await activeShardsLatch.unsafeClose() @@ -241,6 +261,12 @@ const make = Effect.gen(function*() { MutableHashSet.remove(acquiredShards, shardId) MutableHashSet.add(releasingShards, shardId) } + + if (MutableHashSet.size(releasingShards) > 0) { + yield* Effect.forkIn(syncSingletons, shardingScope) + yield* releaseShards + } + // if a shard has been assigned to this runner, we acquire it const unacquiredShards = MutableHashSet.empty() for (const shardId of selfShards) { @@ -248,60 +274,64 @@ const make = Effect.gen(function*() { MutableHashSet.add(unacquiredShards, shardId) } - if (MutableHashSet.size(releasingShards) > 0) { - yield* Effect.forkIn(syncSingletons, shardingScope) - yield* releaseShards - } - if (MutableHashSet.size(unacquiredShards) === 0) { continue } - const acquired = yield* shardStorage.acquire(selfAddress, unacquiredShards) + const acquired = yield* runnerStorage.acquire(selfAddress, unacquiredShards) yield* Effect.ignore(storage.resetShards(acquired)) for (const shardId of acquired) { + if (MutableHashSet.has(releasingShards, shardId) || !MutableHashSet.has(selfShards, shardId)) { + continue + } MutableHashSet.add(acquiredShards, shardId) } if (acquired.length > 0) { yield* storageReadLatch.open yield* Effect.forkIn(syncSingletons, shardingScope) + + // update metrics + ClusterMetrics.shards.unsafeUpdate(BigInt(MutableHashSet.size(acquiredShards)), []) } yield* Effect.sleep(1000) activeShardsLatch.unsafeOpen() } }).pipe( Effect.catchAllCause((cause) => Effect.logWarning("Could not acquire/release shards", cause)), - Effect.forever, + Effect.repeat(Schedule.spaced(config.entityMessagePollInterval)), Effect.annotateLogs({ package: "@effect/cluster", module: "Sharding", fiber: "Shard acquisition loop", runner: selfAddress }), - Effect.interruptible, Effect.forkIn(shardingScope) ) - // refresh the shard locks every 4s + // refresh the shard locks every `shardLockRefreshInterval` yield* Effect.suspend(() => - shardStorage.refresh(selfAddress, [ + runnerStorage.refresh(selfAddress, [ ...acquiredShards, ...releasingShards ]) ).pipe( Effect.flatMap((acquired) => { for (const shardId of acquiredShards) { - if (!acquired.some((_) => _[Equal.symbol](shardId))) { + if (!acquired.includes(shardId)) { MutableHashSet.remove(acquiredShards, shardId) MutableHashSet.add(releasingShards, shardId) } } - return MutableHashSet.size(releasingShards) > 0 ? - Effect.andThen( - Effect.forkIn(syncSingletons, shardingScope), - releaseShards - ) : - Effect.void + for (let i = 0; i < acquired.length; i++) { + const shardId = acquired[i] + if (!MutableHashSet.has(selfShards, shardId)) { + MutableHashSet.remove(acquiredShards, shardId) + MutableHashSet.add(releasingShards, shardId) + } + } + return MutableHashSet.size(releasingShards) > 0 + ? activeShardsLatch.open + : Effect.void }), Effect.retry({ times: 5, @@ -312,8 +342,8 @@ const make = Effect.gen(function*() { Effect.andThen(clearSelfShards) ) ), - Effect.schedule(Schedule.fixed(4000)), - Effect.interruptible, + Effect.repeat(Schedule.fixed(config.shardLockRefreshInterval)), + Effect.forever, Effect.forkIn(shardingScope) ) @@ -328,86 +358,41 @@ const make = Effect.gen(function*() { (state) => state.manager.interruptShard(shardId), { concurrency: "unbounded", discard: true } ).pipe( - Effect.andThen(shardStorage.release(selfAddress, shardId)), + Effect.andThen(runnerStorage.release(selfAddress, shardId)), Effect.annotateLogs({ runner: selfAddress }), - Effect.andThen(() => { + Effect.flatMap(() => { MutableHashSet.remove(releasingShards, shardId) + return storage.unregisterShardReplyHandlers(shardId) }) ), { concurrency: "unbounded", discard: true } ) - ).pipe(Effect.andThen(activeShardsLatch.open)) + ) + ) + + // open the shard latch every poll interval + yield* activeShardsLatch.open.pipe( + Effect.delay(config.entityMessagePollInterval), + Effect.forever, + Effect.forkIn(shardingScope) ) } - const clearSelfShards = Effect.suspend(() => { + const clearSelfShards = Effect.sync(() => { MutableHashSet.clear(selfShards) - return activeShardsLatch.open + activeShardsLatch.unsafeOpen() }) - // --- Singletons --- - - const singletons = new Map>>() - const singletonFibers = yield* FiberMap.make() - const withSingletonLock = Effect.unsafeMakeSemaphore(1).withPermits(1) - - const registerSingleton: Sharding["Type"]["registerSingleton"] = Effect.fnUntraced( - function*(name, run, options) { - const shardGroup = options?.shardGroup ?? "default" - const address = new SingletonAddress({ - shardId: getShardId(EntityId.make(name), shardGroup), - name - }) - - let map = singletons.get(address.shardId) - if (!map) { - map = MutableHashMap.empty() - singletons.set(address.shardId, map) - } - if (MutableHashMap.has(map, address)) { - return yield* Effect.dieMessage(`Singleton '${name}' is already registered`) - } - - const context = yield* Effect.context() - const wrappedRun = run.pipe( - Effect.locally(FiberRef.currentLogAnnotations, HashMap.empty()), - Effect.andThen(Effect.never), - Effect.scoped, - Effect.provide(context), - Effect.orDie, - Effect.interruptible - ) as Effect.Effect - MutableHashMap.set(map, address, wrappedRun) - - yield* PubSub.publish(events, SingletonRegistered({ address })) - - // start if we are on the right shard - if (MutableHashSet.has(acquiredShards, address.shardId)) { - yield* Effect.logDebug("Starting singleton", address) - yield* FiberMap.run(singletonFibers, address, wrappedRun) - } - }, - withSingletonLock - ) - - const syncSingletons = withSingletonLock(Effect.gen(function*() { - for (const [shardId, map] of singletons) { - for (const [address, run] of map) { - const running = FiberMap.unsafeHas(singletonFibers, address) - const shouldBeRunning = MutableHashSet.has(acquiredShards, shardId) - if (running && !shouldBeRunning) { - yield* Effect.logDebug("Stopping singleton", address) - internalInterruptors.add(yield* Effect.fiberId) - yield* FiberMap.remove(singletonFibers, address) - } else if (!running && shouldBeRunning) { - yield* Effect.logDebug("Starting singleton", address) - yield* FiberMap.run(singletonFibers, address, run) - } - } - } - })) - // --- Storage inbox --- + // + // Responsible for reading unprocessed messages from storage and sending them + // to the appropriate entity manager. + // + // This should be shutdown before shard acquisition, to ensure no messages are + // being processed before the shards are released. + // + // It should also be shutdown after the entity managers, to ensure interrupt + // & ack envelopes can still be processed. const storageReadLatch = yield* Effect.makeLatch(true) const openStorageReadLatch = constant(storageReadLatch.open) @@ -415,13 +400,6 @@ const make = Effect.gen(function*() { const storageReadLock = Effect.unsafeMakeSemaphore(1) const withStorageReadLock = storageReadLock.withPermits(1) - let storageAlreadyProcessed = (_message: Message.IncomingRequest) => true - - // keep track of the last sent request ids to avoid duplicates - // we only keep the last 30 sets to avoid memory leaks - const sentRequestIds = new Set() - const sentRequestIdSets = new Set>() - if (storageEnabled && Option.isSome(config.runnerAddress)) { const selfAddress = config.runnerAddress.value @@ -429,17 +407,87 @@ const make = Effect.gen(function*() { yield* Effect.logDebug("Starting") yield* Effect.addFinalizer(() => Effect.logDebug("Shutting down")) - sentRequestIds.clear() - sentRequestIdSets.clear() + let index = 0 + let messages: Array> = [] + const removableNotifications = new Set() + const resetAddresses = MutableHashSet.empty() - storageAlreadyProcessed = (message: Message.IncomingRequest) => { - if (!sentRequestIds.has(message.envelope.requestId)) { - return false + const processMessages = Effect.whileLoop({ + while: () => index < messages.length, + step: () => index++, + body: () => send + }) + + const send = Effect.catchAllCause( + Effect.suspend(() => { + const message = messages[index] + // if we are shutting down, we don't accept new requests + if (message._tag === "IncomingRequest" && isShutdown.current) { + if (isShutdown.current) { + return Effect.void + } + } + const address = message.envelope.address + if (!MutableHashSet.has(acquiredShards, address.shardId)) { + return Effect.void + } + const state = entityManagers.get(address.entityType) + if (!state) { + // reset address in the case that the entity is slow to register + MutableHashSet.add(resetAddresses, address) + return Effect.void + } else if (state.closed) { + return Effect.void + } + + const isProcessing = state.manager.isProcessingFor(message) + + // If the message might affect a currently processing request, we + // send it to the entity manager to be processed. + if (message._tag === "IncomingEnvelope" && isProcessing) { + return state.manager.send(message) + } else if (isProcessing) { + return Effect.void + } else if (message._tag === "IncomingRequest" && pendingNotifications.has(message.envelope.requestId)) { + const entry = pendingNotifications.get(message.envelope.requestId)! + pendingNotifications.delete(message.envelope.requestId) + removableNotifications.delete(entry) + entry.resume(Effect.void) + } + + // If the entity was resuming in another fiber, we add the message + // id to the unprocessed set. + const resumptionState = MutableHashMap.get(entityResumptionState, address) + if (Option.isSome(resumptionState)) { + resumptionState.value.unprocessed.add(message.envelope.requestId) + if (message.envelope._tag === "Interrupt") { + resumptionState.value.interrupts.set(message.envelope.requestId, message as Message.IncomingEnvelope) + } + return Effect.void + } + return state.manager.send(message) + }), + (cause) => { + const message = messages[index] + const error = Cause.failureOrCause(cause) + // if we get a defect, then update storage + if (Either.isRight(error)) { + if (Cause.isInterrupted(cause)) { + return Effect.void + } + return Effect.ignore(storage.saveReply(Reply.ReplyWithContext.fromDefect({ + id: snowflakeGen.unsafeNext(), + requestId: message.envelope.requestId, + defect: Cause.squash(cause) + }))) + } + if (error.left._tag === "MailboxFull") { + // MailboxFull can only happen for requests, so this cast is safe + return resumeEntityFromStorage(message as Message.IncomingRequest) + } + return Effect.void } - const state = entityManagers.get(message.envelope.address.entityType) - if (!state) return true - return !state.manager.isProcessingFor(message, { excludeReplies: true }) - } + ) while (true) { // wait for the next poll interval, or if we get notified of a change @@ -454,110 +502,47 @@ const make = Effect.gen(function*() { // acquired. yield* storageReadLock.take(1) - const messages = yield* storage.unprocessedMessages(acquiredShards) - const currentSentRequestIds = new Set() - sentRequestIdSets.add(currentSentRequestIds) - - const send = Effect.catchAllCause( - Effect.suspend(() => { - const message = messages[index] - if (message._tag === "IncomingRequest") { - if (sentRequestIds.has(message.envelope.requestId)) { - return Effect.void - } - sentRequestIds.add(message.envelope.requestId) - currentSentRequestIds.add(message.envelope.requestId) - } - const address = message.envelope.address - if (!MutableHashSet.has(acquiredShards, address.shardId)) { - return Effect.void - } - const state = entityManagers.get(address.entityType) - if (!state) { - if (message._tag === "IncomingRequest") { - return Effect.orDie(message.respond(Reply.ReplyWithContext.fromDefect({ - id: snowflakeGen.unsafeNext(), - requestId: message.envelope.requestId, - defect: new EntityNotManagedByRunner({ address }) - }))) - } - return Effect.void - } + entityManagers.forEach((state) => state.manager.clearProcessed()) + if (pendingNotifications.size > 0) { + pendingNotifications.forEach((entry) => removableNotifications.add(entry)) + } - const isProcessing = state.manager.isProcessingFor(message) + messages = yield* storage.unprocessedMessages(acquiredShards) + index = 0 + yield* processMessages - // If the message might affect a currently processing request, we - // send it to the entity manager to be processed. - if (message._tag === "IncomingEnvelope" && isProcessing) { - return state.manager.send(message) - } else if (isProcessing) { - return Effect.void - } - - // If the entity was resuming in another fiber, we add the message - // id to the unprocessed set. - const resumptionState = MutableHashMap.get(entityResumptionState, address) - if (Option.isSome(resumptionState)) { - resumptionState.value.unprocessed.add(message.envelope.requestId) - if (message.envelope._tag === "Interrupt") { - resumptionState.value.interrupts.set(message.envelope.requestId, message as Message.IncomingEnvelope) - } - return Effect.void - } - return state.manager.send(message) - }), - (cause) => { - const message = messages[index] - const error = Cause.failureOption(cause) - // if we get a defect, then update storage - if (Option.isNone(error)) { - if (Cause.isInterrupted(cause)) { - return Effect.void - } - return storage.saveReply(Reply.ReplyWithContext.fromDefect({ - id: snowflakeGen.unsafeNext(), - requestId: message.envelope.requestId, - defect: Cause.squash(cause) - })) - } - if (error.value._tag === "MailboxFull") { - // MailboxFull can only happen for requests, so this cast is safe - return resumeEntityFromStorage(message as Message.IncomingRequest) - } - return Effect.void + if (removableNotifications.size > 0) { + removableNotifications.forEach(({ message, resume }) => { + pendingNotifications.delete(message.envelope.requestId) + resume(Effect.fail(new EntityNotAssignedToRunner({ address: message.envelope.address }))) + }) + removableNotifications.clear() + } + if (MutableHashSet.size(resetAddresses) > 0) { + for (const address of resetAddresses) { + yield* Effect.logWarning("Could not find entity manager for address, retrying").pipe( + Effect.annotateLogs({ address }) + ) + yield* Effect.forkIn(storage.resetAddress(address), shardingScope) } - ) - - let index = 0 - yield* Effect.whileLoop({ - while: () => index < messages.length, - step: () => index++, - body: constant(send) - }) + MutableHashSet.clear(resetAddresses) + } // let the resuming entities check if they are done yield* storageReadLock.release(1) - - while (sentRequestIdSets.size > 30) { - const oldest = Iterable.unsafeHead(sentRequestIdSets) - sentRequestIdSets.delete(oldest) - for (const id of oldest) { - sentRequestIds.delete(id) - } - } } }).pipe( Effect.scoped, Effect.ensuring(storageReadLock.releaseAll), Effect.catchAllCause((cause) => Effect.logWarning("Could not read messages from storage", cause)), - Effect.repeat(Schedule.spaced(config.entityMessagePollInterval)), + Effect.forever, Effect.annotateLogs({ package: "@effect/cluster", module: "Sharding", fiber: "Storage read loop", runner: selfAddress }), - Effect.interruptible, + Effect.withUnhandledErrorLogLevel(Option.none()), Effect.forkIn(shardingScope) ) @@ -565,7 +550,6 @@ const make = Effect.gen(function*() { yield* storageReadLatch.open.pipe( Effect.delay(config.entityMessagePollInterval), Effect.forever, - Effect.interruptible, Effect.forkIn(shardingScope) ) @@ -623,7 +607,7 @@ const make = Effect.gen(function*() { const sendWithRetry: Effect.Effect< void, - EntityNotManagedByRunner | EntityNotAssignedToRunner + EntityNotAssignedToRunner > = Effect.catchTags( Effect.suspend(() => { if (!MutableHashSet.has(acquiredShards, address.shardId)) { @@ -671,7 +655,7 @@ const make = Effect.gen(function*() { while: (e) => e._tag === "PersistenceError", schedule: Schedule.spaced(config.entityMessagePollInterval) }), - Effect.catchAllCause((cause) => Effect.logError("Could not resume unprocessed messages", cause)), + Effect.catchAllCause((cause) => Effect.logDebug("Could not resume unprocessed messages", cause)), (effect, address) => Effect.annotateLogs(effect, { package: "@effect/cluster", @@ -685,107 +669,132 @@ const make = Effect.gen(function*() { effect, Effect.sync(() => MutableHashMap.remove(entityResumptionState, address)) ), - Effect.interruptible, + Effect.withUnhandledErrorLogLevel(Option.none()), Effect.forkIn(shardingScope) ) } // --- Sending messages --- - const sendLocal = | Message.Incoming>( - message: M - ): Effect.Effect< - void, - | EntityNotAssignedToRunner - | EntityNotManagedByRunner - | MailboxFull - | AlreadyProcessingMessage - | (M extends Message.Incoming ? never : PersistenceError) - > => - Effect.suspend(() => { + const sendLocal = | Message.Incoming>(message: M) => + Effect.suspend(function loop(): Effect.Effect< + void, + | EntityNotAssignedToRunner + | MailboxFull + | AlreadyProcessingMessage + | (M extends Message.Incoming ? never : PersistenceError) + > { const address = message.envelope.address if (!isEntityOnLocalShards(address)) { return Effect.fail(new EntityNotAssignedToRunner({ address })) } const state = entityManagers.get(address.entityType) if (!state) { - return Effect.fail(new EntityNotManagedByRunner({ address })) + return Effect.flatMap(waitForEntityManager(address.entityType), loop) + } else if (state.closed || (isShutdown.current && message._tag === "IncomingRequest")) { + // if we are shutting down, we don't accept new requests + return Effect.fail(new EntityNotAssignedToRunner({ address })) } return message._tag === "IncomingRequest" || message._tag === "IncomingEnvelope" ? state.manager.send(message) : - runners.sendLocal({ + runnersService.sendLocal({ message, send: state.manager.sendLocal, simulateRemoteSerialization: config.simulateRemoteSerialization }) as any }) + type PendingNotification = { + resume: (_: Effect.Effect) => void + readonly message: Message.IncomingRequest + } + const pendingNotifications = new Map() const notifyLocal = | Message.Incoming>( message: M, - discard: boolean + discard: boolean, + options?: { + readonly waitUntilRead?: boolean | undefined + } ) => - Effect.suspend( - (): Effect.Effect< - void, - | EntityNotManagedByRunner - | EntityNotAssignedToRunner - | AlreadyProcessingMessage - | (M extends Message.Incoming ? never : PersistenceError) - > => { - const address = message.envelope.address - if (!entityManagers.has(address.entityType)) { - return Effect.fail(new EntityNotManagedByRunner({ address })) - } + Effect.suspend(function loop(): Effect.Effect< + void, + | EntityNotAssignedToRunner + | AlreadyProcessingMessage + | (M extends Message.Incoming ? never : PersistenceError) + > { + const address = message.envelope.address + const state = entityManagers.get(address.entityType) + if (!state) { + return Effect.flatMap(waitForEntityManager(address.entityType), loop) + } else if (state.closed || !isEntityOnLocalShards(address)) { + return Effect.fail(new EntityNotAssignedToRunner({ address })) + } - const isLocal = isEntityOnLocalShards(address) - const notify = storageEnabled - ? openStorageReadLatch - : () => Effect.dieMessage("Sharding.notifyLocal: storage is disabled") + const isLocal = isEntityOnLocalShards(address) + const notify = storageEnabled + ? openStorageReadLatch + : () => Effect.die("Sharding.notifyLocal: storage is disabled") - if (message._tag === "IncomingRequest" || message._tag === "IncomingEnvelope") { - if (message._tag === "IncomingRequest" && storageAlreadyProcessed(message)) { - return Effect.fail(new AlreadyProcessingMessage({ address, envelopeId: message.envelope.requestId })) - } else if (!isLocal) { - return Effect.fail(new EntityNotAssignedToRunner({ address })) - } - return notify() + if (message._tag === "IncomingRequest" || message._tag === "IncomingEnvelope") { + if (!isLocal) { + return Effect.fail(new EntityNotAssignedToRunner({ address })) + } else if ( + message._tag === "IncomingRequest" && state.manager.isProcessingFor(message, { excludeReplies: true }) + ) { + return Effect.fail(new AlreadyProcessingMessage({ address, envelopeId: message.envelope.requestId })) + } else if (message._tag === "IncomingRequest" && options?.waitUntilRead) { + if (!storageEnabled) return notify() + return Effect.async((resume) => { + let entry = pendingNotifications.get(message.envelope.requestId) + if (entry) { + const prevResume = entry.resume + entry.resume = (effect) => { + prevResume(effect) + resume(effect) + } + return + } + entry = { resume, message } + pendingNotifications.set(message.envelope.requestId, entry) + storageReadLatch.unsafeOpen() + }) } - - return runners.notifyLocal({ message, notify, discard, storageOnly: !isLocal }) as any + return notify() } - ) - const isTransientError = Predicate.or(RunnerUnavailable.is, EntityNotAssignedToRunner.is) + return runnersService.notifyLocal({ message, notify, discard, storageOnly: !isLocal }) as any + }) + function sendOutgoing( message: Message.Outgoing, discard: boolean, retries?: number ): Effect.Effect< void, - EntityNotManagedByRunner | MailboxFull | AlreadyProcessingMessage | PersistenceError + MailboxFull | AlreadyProcessingMessage | PersistenceError > { return Effect.catchIf( Effect.suspend(() => { const address = message.envelope.address - const maybeRunner = MutableHashMap.get(shardAssignments, address.shardId) const isPersisted = Context.get(message.rpc.annotations, Persisted) if (isPersisted && !storageEnabled) { - return Effect.dieMessage("Sharding.sendOutgoing: Persisted messages require MessageStorage") + return Effect.die("Sharding.sendOutgoing: Persisted messages require MessageStorage") } + const maybeRunner = MutableHashMap.get(shardAssignments, address.shardId) const runnerIsLocal = Option.isSome(maybeRunner) && isLocalRunner(maybeRunner.value) if (isPersisted) { return runnerIsLocal ? notifyLocal(message, discard) - : runners.notify({ address: maybeRunner, message, discard }) + : runnersService.notify({ address: maybeRunner, message, discard }) } else if (Option.isNone(maybeRunner)) { return Effect.fail(new EntityNotAssignedToRunner({ address })) } return runnerIsLocal ? sendLocal(message) - : runners.send({ address: maybeRunner.value, message }) + : runnersService.send({ address: maybeRunner.value, message }) }), - isTransientError, + (error) => error._tag === "EntityNotAssignedToRunner" || error._tag === "RunnerUnavailable", (error) => { if (retries === 0) { return Effect.die(error) @@ -795,158 +804,156 @@ const make = Effect.gen(function*() { ) } - const reset: Sharding["Type"]["reset"] = Effect.fnUntraced( - function*(requestId) { - yield* storage.clearReplies(requestId) - sentRequestIds.delete(requestId) - }, - Effect.matchCause({ + const reset: Sharding["Type"]["reset"] = (requestId) => + Effect.matchCause(storage.clearReplies(requestId), { onSuccess: () => true, onFailure: () => false }) - ) - - // --- Shard Manager sync --- - const shardManagerTimeoutFiber = yield* FiberHandle.make().pipe( - Scope.extend(shardingScope) - ) - const startShardManagerTimeout = FiberHandle.run( - shardManagerTimeoutFiber, - Effect.flatMap(Effect.sleep(config.shardManagerUnavailableTimeout), () => { - MutableHashMap.clear(shardAssignments) - return clearSelfShards - }), - { onlyIfMissing: true } - ) - const stopShardManagerTimeout = FiberHandle.clear(shardManagerTimeoutFiber) + // --- RunnerStorage sync --- + // + // This is responsible for syncing the local view of runners and shard + // assignments with RunnerStorage. + // + // It should be shutdown after the clients, so that they can still get correct + // shard assignments for outgoing messages (they could still be in use by + // entities that are shutting down). + + const selfRunner = Option.isSome(config.runnerAddress) ? + new Runner({ + address: config.runnerAddress.value, + groups: config.shardGroups, + weight: config.runnerShardWeight + }) : + undefined + + let allRunners = MutableHashMap.empty() + let healthyRunnerCount = 0 + + // update metrics + if (selfRunner) { + ClusterMetrics.runners.unsafeUpdate(BigInt(1), []) + ClusterMetrics.runnersHealthy.unsafeUpdate(BigInt(1), []) + } - // Every time the link to the shard manager is lost, we re-register the runner - // and re-subscribe to sharding events yield* Effect.gen(function*() { - yield* Effect.logDebug("Registering with shard manager") - if (!isShutdown.current && Option.isSome(config.runnerAddress)) { - const machineId = yield* shardManager.register(config.runnerAddress.value, config.shardGroups) - yield* snowflakeGen.setMachineId(machineId) - } + const hashRings = new Map>() + let nextRunners = MutableHashMap.empty() + const healthyRunners = MutableHashSet.empty() + + while (true) { + // Ensure the current runner is registered + if (selfRunner && !isShutdown.current && !MutableHashMap.has(allRunners, selfRunner)) { + yield* Effect.logDebug("Registering runner", selfRunner) + const machineId = yield* runnerStorage.register(selfRunner, true) + yield* snowflakeGen.setMachineId(machineId) + } - yield* stopShardManagerTimeout + const runners = yield* runnerStorage.getRunners + let changed = false + for (let i = 0; i < runners.length; i++) { + const [runner, healthy] = runners[i] + MutableHashMap.set(nextRunners, runner, healthy) + const wasHealthy = MutableHashSet.has(healthyRunners, runner) + if (!healthy || wasHealthy) { + if (healthy === wasHealthy || !wasHealthy) { + // no change + MutableHashMap.remove(allRunners, runner) + } + continue + } + changed = true + MutableHashSet.add(healthyRunners, runner) + MutableHashMap.remove(allRunners, runner) + for (let j = 0; j < runner.groups.length; j++) { + const group = runner.groups[j] + let ring = hashRings.get(group) + if (!ring) { + ring = HashRing.make() + hashRings.set(group, ring) + } + HashRing.add(ring, runner.address, { weight: runner.weight }) + } + } - yield* Effect.logDebug("Subscribing to sharding events") - const mailbox = yield* shardManager.shardingEvents(config.runnerAddress) - const startedLatch = yield* Deferred.make() + // Remove runners that are no longer present or healthy + MutableHashMap.forEach(allRunners, (_, runner) => { + changed = true + MutableHashMap.remove(allRunners, runner) + MutableHashSet.remove(healthyRunners, runner) + runFork(runnersService.onRunnerUnavailable(runner.address)) + for (let i = 0; i < runner.groups.length; i++) { + HashRing.remove(hashRings.get(runner.groups[i])!, runner.address) + } + }) - const eventsFiber = yield* Effect.gen(function*() { - while (true) { - const [events, done] = yield* mailbox.takeAll - if (done) return - for (const event of events) { - yield* Effect.logDebug("Received sharding event", event) - - switch (event._tag) { - case "StreamStarted": { - yield* Deferred.done(startedLatch, Exit.void) - break - } - case "ShardsAssigned": { - for (const shard of event.shards) { - MutableHashMap.set(shardAssignments, shard, event.address) - } - if (!MutableRef.get(isShutdown) && isLocalRunner(event.address)) { - for (const shardId of event.shards) { - if (MutableHashSet.has(selfShards, shardId)) continue - MutableHashSet.add(selfShards, shardId) - } - yield* activeShardsLatch.open - } - break - } - case "ShardsUnassigned": { - for (const shard of event.shards) { - MutableHashMap.remove(shardAssignments, shard) - } - if (isLocalRunner(event.address)) { - for (const shard of event.shards) { - MutableHashSet.remove(selfShards, shard) - } - yield* activeShardsLatch.open + // swap allRunners and nextRunners + const prevRunners = allRunners + allRunners = nextRunners + nextRunners = prevRunners + healthyRunnerCount = MutableHashSet.size(healthyRunners) + + // Ensure the current runner is registered + if (selfRunner && !isShutdown.current && !MutableHashMap.has(allRunners, selfRunner)) { + continue + } + + // Recompute shard assignments if the set of healthy runners has changed. + if (changed) { + MutableHashSet.clear(selfShards) + hashRings.forEach((ring, group) => { + const newAssignments = HashRing.getShards(ring, config.shardsPerGroup) + for (let i = 0; i < config.shardsPerGroup; i++) { + const shard = makeShardId(group, i + 1) + if (newAssignments) { + const runner = newAssignments[i] + MutableHashMap.set(shardAssignments, shard, runner) + if (isLocalRunner(runner)) { + MutableHashSet.add(selfShards, shard) } - break - } - case "RunnerUnregistered": { - if (!isLocalRunner(event.address)) break - return yield* Effect.fail(new ClusterError.RunnerNotRegistered({ address: event.address })) + } else { + MutableHashMap.remove(shardAssignments, shard) } } + }) + yield* Effect.logDebug("New shard assignments", selfShards) + activeShardsLatch.unsafeOpen() + + // update metrics + if (selfRunner) { + ClusterMetrics.runnersHealthy.unsafeUpdate( + BigInt(MutableHashSet.has(healthyRunners, selfRunner) ? 1 : 0), + [] + ) } } - }).pipe( - Effect.intoDeferred(startedLatch), - Effect.zipRight(Effect.dieMessage("Shard manager event stream down")), - Effect.forkScoped - ) - - // Wait for the stream to be established - yield* Deferred.await(startedLatch) - // perform a full sync every config.refreshAssignmentsInterval - const syncFiber = yield* syncAssignments.pipe( - Effect.andThen(Effect.sleep(config.refreshAssignmentsInterval)), - Effect.forever, - Effect.forkScoped - ) + if (selfRunner && MutableHashSet.size(healthyRunners) === 0) { + yield* Effect.logWarning("No healthy runners available") + // to prevent a deadlock, we will mark the current node as healthy to + // start the health check singleton again + yield* runnerStorage.setRunnerHealth(selfRunner.address, true) + } - return yield* Fiber.joinAll([eventsFiber, syncFiber]) + yield* Effect.sleep(config.refreshAssignmentsInterval) + } }).pipe( - Effect.scoped, Effect.catchAllCause((cause) => Effect.logDebug(cause)), - Effect.zipRight(startShardManagerTimeout), - Effect.repeat( - Schedule.exponential(1000).pipe( - Schedule.union(Schedule.spaced(10_000)) - ) - ), + Effect.repeat(Schedule.spaced(1000)), Effect.annotateLogs({ package: "@effect/cluster", module: "Sharding", - fiber: "ShardManager sync", + fiber: "RunnerStorage sync", runner: config.runnerAddress }), - Effect.interruptible, Effect.forkIn(shardingScope) ) - const syncAssignments = Effect.gen(function*() { - const assignments = yield* shardManager.getAssignments - yield* Effect.logDebug("Received shard assignments", assignments) - - for (const [shardId, runner] of assignments) { - if (Option.isNone(runner)) { - MutableHashMap.remove(shardAssignments, shardId) - MutableHashSet.remove(selfShards, shardId) - continue - } - - MutableHashMap.set(shardAssignments, shardId, runner.value) - - if (!isLocalRunner(runner.value)) { - MutableHashSet.remove(selfShards, shardId) - continue - } - if (MutableRef.get(isShutdown) || MutableHashSet.has(selfShards, shardId)) { - continue - } - MutableHashSet.add(selfShards, shardId) - } - - yield* activeShardsLatch.open - }) - // --- Clients --- type ClientRequestEntry = { readonly rpc: Rpc.AnyWithProps - readonly context: Context.Context + readonly services: Context.Context lastChunkId?: Snowflake.Snowflake } const clientRequests = new Map() @@ -955,7 +962,7 @@ const make = Effect.gen(function*() { Entity, (entityId: string) => RpcClient.RpcClient< any, - MailboxFull | AlreadyProcessingMessage | EntityNotManagedByRunner + MailboxFull | AlreadyProcessingMessage >, never > = yield* ResourceMap.make(Effect.fnUntraced(function*(entity: Entity) { @@ -967,7 +974,7 @@ const make = Effect.gen(function*() { flatten: true, onFromClient(options): Effect.Effect< void, - MailboxFull | AlreadyProcessingMessage | EntityNotManagedByRunner | PersistenceError + MailboxFull | AlreadyProcessingMessage | PersistenceError > { const address = Context.unsafeGet(options.context, ClientAddressTag) switch (options.message._tag) { @@ -979,7 +986,7 @@ const make = Effect.gen(function*() { if (!options.discard) { const entry: ClientRequestEntry = { rpc: rpc as any, - context: fiber.currentContext + services: fiber.currentContext } clientRequests.set(id, entry) respond = makeClientRespond(entry, client.write) @@ -1065,8 +1072,8 @@ const make = Effect.gen(function*() { ) return (entityId: string) => { - const id = EntityId.make(entityId) - const address = ClientAddressTag.context(EntityAddress.make({ + const id = makeEntityId(entityId) + const address = ClientAddressTag.context(makeEntityAddress({ shardId: getShardId(id, entity.getShardGroup(entityId as EntityId)), entityId: id, entityType: entity.type @@ -1100,7 +1107,7 @@ const make = Effect.gen(function*() { const makeClient = (entity: Entity): Effect.Effect< ( entityId: string - ) => RpcClient.RpcClient.From + ) => RpcClient.RpcClient.From > => clients.get(entity) as any const clientRespondDiscard = (_reply: Reply.Reply) => Effect.void @@ -1132,14 +1139,88 @@ const make = Effect.gen(function*() { } } + // --- Singletons --- + + const singletons = new Map>>() + const singletonFibers = yield* FiberMap.make() + const withSingletonLock = Effect.unsafeMakeSemaphore(1).withPermits(1) + + const registerSingleton: Sharding["Type"]["registerSingleton"] = Effect.fnUntraced( + function*(name, run, options) { + const shardGroup = options?.shardGroup ?? "default" + const address = new SingletonAddress({ + shardId: getShardId(makeEntityId(name), shardGroup), + name + }) + + let map = singletons.get(address.shardId) + if (!map) { + map = MutableHashMap.empty() + singletons.set(address.shardId, map) + } + if (MutableHashMap.has(map, address)) { + return yield* Effect.die(`Singleton '${name}' is already registered`) + } + + const context = yield* Effect.context() + const wrappedRun = run.pipe( + Effect.locally(FiberRef.currentLogAnnotations, HashMap.empty()), + Effect.andThen(Effect.never), + Effect.scoped, + Effect.provide(context), + Effect.orDie, + Effect.interruptible + ) as Effect.Effect + MutableHashMap.set(map, address, wrappedRun) + + yield* PubSub.publish(events, SingletonRegistered({ address })) + + // start if we are on the right shard + if (MutableHashSet.has(acquiredShards, address.shardId)) { + yield* Effect.logDebug("Starting singleton", address) + yield* FiberMap.run(singletonFibers, address, wrappedRun) + } + }, + withSingletonLock + ) + + const syncSingletons = withSingletonLock(Effect.gen(function*() { + for (const [shardId, map] of singletons) { + for (const [address, run] of map) { + const running = FiberMap.unsafeHas(singletonFibers, address) + const shouldBeRunning = MutableHashSet.has(acquiredShards, shardId) + if (running && !shouldBeRunning) { + yield* Effect.logDebug("Stopping singleton", address) + internalInterruptors.add(Option.getOrThrow(Fiber.getCurrentFiber()).id()) + yield* FiberMap.remove(singletonFibers, address) + } else if (!running && shouldBeRunning) { + yield* Effect.logDebug("Starting singleton", address) + yield* FiberMap.run(singletonFibers, address, run) + } + } + } + ClusterMetrics.singletons.unsafeUpdate( + BigInt(yield* FiberMap.size(singletonFibers)), + [] + ) + })) + // --- Entities --- const context = yield* Effect.context() const reaper = yield* EntityReaper + const entityManagerLatches = new Map() + const registerEntity: Sharding["Type"]["registerEntity"] = Effect.fnUntraced( function*(entity, build, options) { if (Option.isNone(config.runnerAddress) || entityManagers.has(entity.type)) return const scope = yield* Scope.make() + yield* Scope.addFinalizer( + scope, + Effect.sync(() => { + state.closed = true + }) + ) const manager = yield* EntityManager.make(entity, build, { ...options, storage, @@ -1152,11 +1233,22 @@ const make = Effect.gen(function*() { Context.add(Snowflake.Generator, snowflakeGen) )) ) as Effect.Effect - entityManagers.set(entity.type, { + const state: EntityManagerState = { entity, scope, + closed: false, manager - }) + } + + // register entities while storage is idle + // this ensures message order is preserved + yield* withStorageReadLock(Effect.sync(() => { + entityManagers.set(entity.type, state) + if (entityManagerLatches.has(entity.type)) { + entityManagerLatches.get(entity.type)!.unsafeOpen() + entityManagerLatches.delete(entity.type) + } + })) yield* PubSub.publish(events, EntityRegistered({ entity })) } @@ -1176,29 +1268,67 @@ const make = Effect.gen(function*() { ) ) - // --- Finalization --- + const waitForEntityManager = (entityType: string) => { + let latch = entityManagerLatches.get(entityType) + if (!latch) { + latch = Effect.unsafeMakeLatch() + entityManagerLatches.set(entityType, latch) + } + return latch.await + } - if (Option.isSome(config.runnerAddress)) { - const selfAddress = config.runnerAddress.value - // Unregister runner from shard manager when scope is closed - yield* Scope.addFinalizer( - shardingScope, - Effect.gen(function*() { - yield* Effect.logDebug("Unregistering runner from shard manager", selfAddress) - yield* shardManager.unregister(selfAddress).pipe( - Effect.catchAllCause((cause) => Effect.logError("Error calling unregister with shard manager", cause)) + // --- Runner health checks --- + + if (selfRunner) { + const checkRunner = ([runner, healthy]: [Runner, boolean]) => + Effect.flatMap(runnerHealth.isAlive(runner.address), (isAlive) => { + if (healthy === isAlive) return Effect.void + if (isAlive) { + healthyRunnerCount++ + return Effect.logDebug(`Runner is healthy`, runner).pipe( + Effect.andThen(runnerStorage.setRunnerHealth(runner.address, isAlive)) + ) + } + if (healthyRunnerCount <= 1) { + // never mark the last runner as unhealthy, to prevent a deadlock + return Effect.void + } + healthyRunnerCount-- + return Effect.logDebug(`Runner is unhealthy`, runner).pipe( + Effect.andThen(runnerStorage.setRunnerHealth(runner.address, isAlive)) ) - yield* clearSelfShards }) + + yield* registerSingleton( + "effect/cluster/Sharding/RunnerHealth", + Effect.gen(function*() { + while (true) { + // Skip health checks if we are the only runner + if (MutableHashMap.size(allRunners) > 1) { + yield* Effect.forEach(allRunners, checkRunner, { discard: true, concurrency: 10 }) + } + yield* Effect.sleep(config.runnerHealthCheckInterval) + } + }).pipe( + Effect.catchAllCause((cause) => Effect.logDebug("Runner health check failed", cause)), + Effect.forever, + Effect.annotateLogs({ + package: "@effect/cluster", + module: "Sharding", + fiber: "Runner health check" + }) + ) ) } + // --- Finalization --- + yield* Scope.addFinalizer( shardingScope, Effect.withFiberRuntime((fiber) => { MutableRef.set(isShutdown, true) internalInterruptors.add(fiber.id()) - return Effect.void + return selfRunner ? Effect.ignore(runnerStorage.unregister(selfRunner.address)) : Effect.void }) ) @@ -1213,13 +1343,18 @@ const make = Effect.gen(function*() { const sharding = Sharding.of({ getRegistrationEvents, getShardId, + hasShardId(shardId: ShardId) { + if (isShutdown.current) return false + return MutableHashSet.has(acquiredShards, shardId) + }, + getSnowflake: Effect.sync(() => snowflakeGen.unsafeNext()), isShutdown: Effect.sync(() => MutableRef.get(isShutdown)), registerEntity, registerSingleton, makeClient, send: sendLocal, sendOutgoing: (message, discard) => sendOutgoing(message, discard), - notify: (message) => notifyLocal(message, false), + notify: (message, options) => notifyLocal(message, false, options), activeEntityCount, pollStorage: storageReadLatch.open, reset @@ -1235,8 +1370,8 @@ const make = Effect.gen(function*() { export const layer: Layer.Layer< Sharding, never, - ShardingConfig | Runners | ShardManagerClient | MessageStorage.MessageStorage | ShardStorage -> = Layer.scoped(Sharding, make).pipe( + ShardingConfig | Runners | MessageStorage.MessageStorage | RunnerStorage | RunnerHealth.RunnerHealth +> = Layer.scoped(Sharding)(make).pipe( Layer.provide([Snowflake.layerGenerator, EntityReaper.Default]) ) diff --git a/packages/cluster/src/ShardingConfig.ts b/packages/cluster/src/ShardingConfig.ts index 8333ed66f56..c4bec132d8a 100644 --- a/packages/cluster/src/ShardingConfig.ts +++ b/packages/cluster/src/ShardingConfig.ts @@ -34,9 +34,15 @@ export class ShardingConfig extends Context.Tag("@effect/cluster/ShardingConfig" */ readonly runnerListenAddress: Option.Option /** - * The version of the current runner. + * A number that determines how many shards this runner will be assigned + * relative to other runners. + * + * Defaults to `1`. + * + * A value of `2` means that this runner should be assigned twice as many + * shards as a runner with a weight of `1`. */ - readonly serverVersion: number + readonly runnerShardWeight: number /** * The shard groups that are assigned to this runner. * @@ -50,14 +56,13 @@ export class ShardingConfig extends Context.Tag("@effect/cluster/ShardingConfig" */ readonly shardsPerGroup: number /** - * The address of the shard manager. + * Shard lock refresh interval. */ - readonly shardManagerAddress: RunnerAddress + readonly shardLockRefreshInterval: DurationInput /** - * If the shard manager is unavailable for this duration, all the shard - * assignments will be reset. + * Shard lock expiration duration. */ - readonly shardManagerUnavailableTimeout: DurationInput + readonly shardLockExpiration: DurationInput /** * The default capacity of the mailbox for entities. */ @@ -81,12 +86,19 @@ export class ShardingConfig extends Context.Tag("@effect/cluster/ShardingConfig" * The interval at which to poll for client replies from storage. */ readonly entityReplyPollInterval: DurationInput + /** + * The interval at which to poll for new runners and refresh shard + * assignments. + */ readonly refreshAssignmentsInterval: DurationInput /** * The interval to retry a send if EntityNotAssignedToRunner is returned. */ readonly sendRetryInterval: DurationInput - // readonly unhealthyRunnerReportInterval: Duration.Duration + /** + * The interval at which to check for unhealthy runners and report them + */ + readonly runnerHealthCheckInterval: DurationInput /** * Simulate serialization and deserialization to remote runners for local * entities. @@ -103,18 +115,19 @@ const defaultRunnerAddress = RunnerAddress.make({ host: "localhost", port: 34431 export const defaults: ShardingConfig["Type"] = { runnerAddress: Option.some(defaultRunnerAddress), runnerListenAddress: Option.none(), - serverVersion: 1, + runnerShardWeight: 1, shardsPerGroup: 300, - shardManagerAddress: RunnerAddress.make({ host: "localhost", port: 8080 }), - shardManagerUnavailableTimeout: Duration.minutes(10), shardGroups: ["default"], + shardLockRefreshInterval: Duration.seconds(10), + shardLockExpiration: Duration.seconds(35), entityMailboxCapacity: 4096, entityMaxIdleTime: Duration.minutes(1), entityTerminationTimeout: Duration.seconds(15), entityMessagePollInterval: Duration.seconds(10), entityReplyPollInterval: Duration.millis(200), sendRetryInterval: Duration.millis(100), - refreshAssignmentsInterval: Duration.minutes(5), + refreshAssignmentsInterval: Duration.seconds(3), + runnerHealthCheckInterval: Duration.minutes(1), simulateRemoteSerialization: true } @@ -155,9 +168,8 @@ export const config: Config.Config = Config.all({ Config.withDescription("The port to listen on.") ) }).pipe(Config.map((options) => RunnerAddress.make(options)), Config.option), - serverVersion: Config.integer("serverVersion").pipe( - Config.withDefault(defaults.serverVersion), - Config.withDescription("The version of the current runner.") + runnerShardWeight: Config.integer("runnerShardWeight").pipe( + Config.withDefault(defaults.runnerShardWeight) ), shardGroups: Config.array(Config.string("shardGroups")).pipe( Config.withDefault(["default"]), @@ -167,21 +179,13 @@ export const config: Config.Config = Config.all({ Config.withDefault(defaults.shardsPerGroup), Config.withDescription("The number of shards to allocate per shard group.") ), - shardManagerAddress: Config.all({ - host: Config.string("shardManagerHost").pipe( - Config.withDefault(defaults.shardManagerAddress.host), - Config.withDescription("The host of the shard manager.") - ), - port: Config.integer("shardManagerPort").pipe( - Config.withDefault(defaults.shardManagerAddress.port), - Config.withDescription("The port of the shard manager.") - ) - }).pipe(Config.map((options) => RunnerAddress.make(options))), - shardManagerUnavailableTimeout: Config.duration("shardManagerUnavailableTimeout").pipe( - Config.withDefault(defaults.shardManagerUnavailableTimeout), - Config.withDescription( - "If the shard is unavilable for this duration, all the shard assignments will be reset." - ) + shardLockRefreshInterval: Config.duration("shardLockRefreshInterval").pipe( + Config.withDefault(defaults.shardLockRefreshInterval), + Config.withDescription("Shard lock refresh interval.") + ), + shardLockExpiration: Config.duration("shardLockExpiration").pipe( + Config.withDefault(defaults.shardLockExpiration), + Config.withDescription("Shard lock expiration duration.") ), entityMailboxCapacity: Config.integer("entityMailboxCapacity").pipe( Config.withDefault(defaults.entityMailboxCapacity), @@ -207,12 +211,16 @@ export const config: Config.Config = Config.all({ ), sendRetryInterval: Config.duration("sendRetryInterval").pipe( Config.withDefault(defaults.sendRetryInterval), - Config.withDescription("The interval to retry a send if EntityNotManagedByRunner is returned.") + Config.withDescription("The interval to retry a send if EntityNotAssignedToRunner is returned.") ), refreshAssignmentsInterval: Config.duration("refreshAssignmentsInterval").pipe( Config.withDefault(defaults.refreshAssignmentsInterval), Config.withDescription("The interval at which to refresh shard assignments.") ), + runnerHealthCheckInterval: Config.duration("runnerHealthCheckInterval").pipe( + Config.withDefault(defaults.runnerHealthCheckInterval), + Config.withDescription("The interval at which to check for unhealthy runners and report them.") + ), simulateRemoteSerialization: Config.boolean("simulateRemoteSerialization").pipe( Config.withDefault(defaults.simulateRemoteSerialization), Config.withDescription("Simulate serialization and deserialization to remote runners for local entities.") diff --git a/packages/cluster/src/Snowflake.ts b/packages/cluster/src/Snowflake.ts index aefaa5f0001..8031ecc1167 100644 --- a/packages/cluster/src/Snowflake.ts +++ b/packages/cluster/src/Snowflake.ts @@ -162,7 +162,7 @@ export const makeGenerator: Effect.Effect = Effect.gen(func // reset sequence if we're in a new millisecond sequence = 0 sequenceAt = now - } else if (sequence >= 1024) { + } else if (sequence >= 4096) { // if we've hit the max sequence for this millisecond, go to the next // millisecond sequenceAt++ diff --git a/packages/cluster/src/SocketRunner.ts b/packages/cluster/src/SocketRunner.ts index 833eb7b225d..615ac24cf30 100644 --- a/packages/cluster/src/SocketRunner.ts +++ b/packages/cluster/src/SocketRunner.ts @@ -7,11 +7,12 @@ import * as RpcServer from "@effect/rpc/RpcServer" import * as Effect from "effect/Effect" import * as Layer from "effect/Layer" import type { MessageStorage } from "./MessageStorage.js" +import type { RunnerHealth } from "./RunnerHealth.js" import type * as Runners from "./Runners.js" import * as RunnerServer from "./RunnerServer.js" +import type * as RunnerStorage from "./RunnerStorage.js" import type * as Sharding from "./Sharding.js" import type { ShardingConfig } from "./ShardingConfig.js" -import type * as ShardStorage from "./ShardStorage.js" const withLogAddress = (layer: Layer.Layer): Layer.Layer => Layer.effectDiscard(Effect.gen(function*() { @@ -37,10 +38,11 @@ export const layer: Layer.Layer< | RpcSerialization.RpcSerialization | SocketServer | MessageStorage - | ShardStorage.ShardStorage + | RunnerStorage.RunnerStorage + | RunnerHealth > = RunnerServer.layerWithClients.pipe( withLogAddress, - Layer.provide(Layer.fresh(RpcServer.layerProtocolSocketServer)) + Layer.provide(RpcServer.layerProtocolSocketServer) ) /** @@ -50,5 +52,5 @@ export const layer: Layer.Layer< export const layerClientOnly: Layer.Layer< Sharding.Sharding | Runners.Runners, never, - Runners.RpcClientProtocol | ShardingConfig | MessageStorage + Runners.RpcClientProtocol | ShardingConfig | MessageStorage | RunnerStorage.RunnerStorage > = RunnerServer.layerClientOnly diff --git a/packages/cluster/src/SocketShardManager.ts b/packages/cluster/src/SocketShardManager.ts deleted file mode 100644 index c2879870085..00000000000 --- a/packages/cluster/src/SocketShardManager.ts +++ /dev/null @@ -1,48 +0,0 @@ -/** - * @since 1.0.0 - */ -import { SocketServer } from "@effect/platform/SocketServer" -import type { RpcSerialization } from "@effect/rpc/RpcSerialization" -import * as RpcServer from "@effect/rpc/RpcServer" -import * as Effect from "effect/Effect" -import * as Layer from "effect/Layer" -import * as MessageStorage from "./MessageStorage.js" -import type { RunnerHealth } from "./RunnerHealth.js" -import * as Runners from "./Runners.js" -import type { ShardingConfig } from "./ShardingConfig.js" -import * as ShardManager from "./ShardManager.js" -import type { ShardStorage } from "./ShardStorage.js" - -const withLogAddress = (layer: Layer.Layer): Layer.Layer => - Layer.effectDiscard(Effect.gen(function*() { - const server = yield* SocketServer - const address = server.address._tag === "UnixAddress" - ? server.address.path - : `${server.address.hostname}:${server.address.port}` - yield* Effect.annotateLogs(Effect.logInfo(`Listening on: ${address}`), { - package: "@effect/cluster", - service: "ShardManager" - }) - })).pipe(Layer.provideMerge(layer)) - -/** - * @since 1.0.0 - * @category Layers - */ -export const layer: Layer.Layer< - ShardManager.ShardManager, - never, - | ShardStorage - | SocketServer - | Runners.RpcClientProtocol - | RpcSerialization - | RunnerHealth - | ShardManager.Config - | ShardingConfig -> = ShardManager.layerServer.pipe( - withLogAddress, - Layer.provide(Layer.fresh(RpcServer.layerProtocolSocketServer)), - Layer.provideMerge(ShardManager.layer), - Layer.provide(Runners.layerRpc), - Layer.provide(MessageStorage.layerNoop) -) diff --git a/packages/cluster/src/SqlMessageStorage.ts b/packages/cluster/src/SqlMessageStorage.ts index 5527ce6836d..e2b9a01a8b6 100644 --- a/packages/cluster/src/SqlMessageStorage.ts +++ b/packages/cluster/src/SqlMessageStorage.ts @@ -6,7 +6,6 @@ import * as SqlClient from "@effect/sql/SqlClient" import type { Row } from "@effect/sql/SqlConnection" import type { SqlError } from "@effect/sql/SqlError" import * as Arr from "effect/Array" -import type { DurationInput } from "effect/Duration" import * as Effect from "effect/Effect" import * as Layer from "effect/Layer" import * as Option from "effect/Option" @@ -41,6 +40,7 @@ export const make = Effect.fnUntraced(function*(options?: { ) const messageKindAckChunk = sql.literal(String(messageKind.AckChunk)) + const messageKindInterrupt = sql.literal(String(messageKind.Interrupt)) const replyKindWithExit = sql.literal(String(replyKind.WithExit)) const messagesTable = table("messages") @@ -284,11 +284,11 @@ export const make = Effect.fnUntraced(function*(options?: { ) }) - const fiveMinutesAgo = sql.onDialectOrElse({ - mssql: () => sql.literal(`DATEADD(MINUTE, -5, GETDATE())`), - mysql: () => sql.literal(`NOW() - INTERVAL 5 MINUTE`), - pg: () => sql.literal(`NOW() - INTERVAL '5 minutes'`), - orElse: () => sql.literal(`DATETIME('now', '-5 minute')`) + const tenMinutesAgo = sql.onDialectOrElse({ + mssql: () => sql.literal(`DATEADD(MINUTE, -10, GETDATE())`), + mysql: () => sql.literal(`NOW() - INTERVAL 10 MINUTE`), + pg: () => sql.literal(`NOW() - INTERVAL '10 minutes'`), + orElse: () => sql.literal(`DATETIME('now', '-10 minute')`) }) const sqlNowString = sql.onDialectOrElse({ pg: () => "NOW()", @@ -318,7 +318,7 @@ export const make = Effect.fnUntraced(function*(options?: { AND (kind = ${replyKindWithExit} OR acked = ${sqlFalse}) ) AND m.processed = ${sqlFalse} - AND (m.last_read IS NULL OR m.last_read < ${fiveMinutesAgo}) + AND (m.last_read IS NULL OR m.last_read < ${tenMinutesAgo}) AND (m.deliver_at IS NULL OR m.deliver_at <= ${sql.literal(String(now))}) ORDER BY m.rowid ASC FOR UPDATE @@ -339,7 +339,7 @@ export const make = Effect.fnUntraced(function*(options?: { AND (kind = ${replyKindWithExit} OR acked = ${sqlFalse}) ) AND processed = ${sqlFalse} - AND (m.last_read IS NULL OR m.last_read < ${fiveMinutesAgo}) + AND (m.last_read IS NULL OR m.last_read < ${tenMinutesAgo}) AND (m.deliver_at IS NULL OR m.deliver_at <= ${sql.literal(String(now))}) ORDER BY m.rowid ASC `.unprepared.pipe( @@ -400,7 +400,8 @@ export const make = Effect.fnUntraced(function*(options?: { ) }).pipe( Effect.provideService(SqlClient.SafeIntegers, true), - PersistenceError.refail + PersistenceError.refail, + withTracerDisabled ), saveReply: (reply) => @@ -415,18 +416,23 @@ export const make = Effect.fnUntraced(function*(options?: { ) }).pipe( Effect.asVoid, - PersistenceError.refail + PersistenceError.refail, + withTracerDisabled ), clearReplies: Effect.fnUntraced( function*(requestId) { - yield* sql`DELETE FROM ${repliesTableSql} WHERE request_id = ${String(requestId)}` + yield* sql`DELETE FROM ${repliesTableSql} WHERE request_id = ${String(requestId)} AND kind = 0` + yield* sql`DELETE FROM ${messagesTableSql} WHERE request_id = ${ + String(requestId) + } AND kind = ${messageKindInterrupt}` yield* sql`UPDATE ${messagesTableSql} SET processed = ${sqlFalse}, last_reply_id = NULL, last_read = NULL WHERE request_id = ${ String(requestId) }` }, sql.withTransaction, - PersistenceError.refail + PersistenceError.refail, + withTracerDisabled ), requestIdForPrimaryKey: (primaryKey) => @@ -437,7 +443,8 @@ export const make = Effect.fnUntraced(function*(options?: { ) ), Effect.provideService(SqlClient.SafeIntegers, true), - PersistenceError.refail + PersistenceError.refail, + withTracerDisabled ), repliesFor: (requestIds) => @@ -590,7 +597,6 @@ export const layer: Layer.Layer< */ export const layerWith = (options: { readonly prefix?: string | undefined - readonly replyPollInterval?: DurationInput | undefined }): Layer.Layer => Layer.scoped(MessageStorage.MessageStorage, make(options)).pipe( Layer.provide(Snowflake.layerGenerator) @@ -638,8 +644,7 @@ const migrations = (options?: { last_reply_id BIGINT, last_read DATETIME, deliver_at BIGINT, - UNIQUE (message_id), - FOREIGN KEY (request_id) REFERENCES ${messagesTableSql} (id) ON DELETE CASCADE + UNIQUE (message_id) ) `, mysql: () => @@ -665,8 +670,7 @@ const migrations = (options?: { last_read DATETIME, deliver_at BIGINT, UNIQUE (id), - UNIQUE (message_id), - FOREIGN KEY (request_id) REFERENCES ${messagesTableSql} (id) ON DELETE CASCADE + UNIQUE (message_id) ) `, pg: () => @@ -691,8 +695,7 @@ const migrations = (options?: { last_reply_id BIGINT, last_read TIMESTAMP, deliver_at BIGINT, - UNIQUE (message_id), - FOREIGN KEY (request_id) REFERENCES ${messagesTableSql} (id) ON DELETE CASCADE + UNIQUE (message_id) ) `.pipe(Effect.ignore), orElse: () => @@ -717,8 +720,7 @@ const migrations = (options?: { last_reply_id INTEGER, last_read TEXT, deliver_at INTEGER, - UNIQUE (message_id), - FOREIGN KEY (request_id) REFERENCES ${messagesTableSql} (id) ON DELETE CASCADE + UNIQUE (message_id) ) ` }) @@ -790,8 +792,7 @@ const migrations = (options?: { sequence INT, acked BIT NOT NULL DEFAULT 0, CONSTRAINT ${sql(repliesTable + "_one_exit")} UNIQUE (request_id, kind), - CONSTRAINT ${sql(repliesTable + "_sequence")} UNIQUE (request_id, sequence), - FOREIGN KEY (request_id) REFERENCES ${messagesTableSql} (id) ON DELETE CASCADE + CONSTRAINT ${sql(repliesTable + "_sequence")} UNIQUE (request_id, sequence) ) `, mysql: () => @@ -806,8 +807,7 @@ const migrations = (options?: { acked BOOLEAN NOT NULL DEFAULT FALSE, UNIQUE (id), UNIQUE (request_id, kind), - UNIQUE (request_id, sequence), - FOREIGN KEY (request_id) REFERENCES ${messagesTableSql} (id) ON DELETE CASCADE + UNIQUE (request_id, sequence) ) `, pg: () => @@ -821,8 +821,7 @@ const migrations = (options?: { sequence INT, acked BOOLEAN NOT NULL DEFAULT FALSE, UNIQUE (request_id, kind), - UNIQUE (request_id, sequence), - FOREIGN KEY (request_id) REFERENCES ${messagesTableSql} (id) ON DELETE CASCADE + UNIQUE (request_id, sequence) ) `, orElse: () => @@ -836,8 +835,7 @@ const migrations = (options?: { sequence INTEGER, acked BOOLEAN NOT NULL DEFAULT FALSE, UNIQUE (request_id, kind), - UNIQUE (request_id, sequence), - FOREIGN KEY (request_id) REFERENCES ${messagesTableSql} (id) ON DELETE CASCADE + UNIQUE (request_id, sequence) ) ` }) diff --git a/packages/cluster/src/SqlRunnerStorage.ts b/packages/cluster/src/SqlRunnerStorage.ts new file mode 100644 index 00000000000..0c07bb27411 --- /dev/null +++ b/packages/cluster/src/SqlRunnerStorage.ts @@ -0,0 +1,541 @@ +/** + * @since 1.0.0 + */ +import * as SqlClient from "@effect/sql/SqlClient" +import type { SqlError } from "@effect/sql/SqlError" +import type * as Statement from "@effect/sql/Statement" +import * as Arr from "effect/Array" +import * as Duration from "effect/Duration" +import * as Effect from "effect/Effect" +import * as Layer from "effect/Layer" +import * as ScopedRef from "effect/ScopedRef" +import { PersistenceError } from "./ClusterError.js" +import * as RunnerStorage from "./RunnerStorage.js" +import * as ShardId from "./ShardId.js" +import * as ShardingConfig from "./ShardingConfig.js" + +const withTracerDisabled = Effect.withTracerEnabled(false) + +/** + * @since 1.0.0 + * @category Constructors + */ +export const make = Effect.fnUntraced(function*(options: { + readonly prefix?: string | undefined +}) { + const config = yield* ShardingConfig.ShardingConfig + const sql = (yield* SqlClient.SqlClient).withoutTransforms() + const prefix = options?.prefix ?? "cluster" + const table = (name: string) => `${prefix}_${name}` + const lockConnRef = yield* sql.onDialectOrElse({ + sqlite: () => Effect.void, + + orElse: () => ScopedRef.fromAcquire(sql.reserve) + }) + + const runnersTable = table("runners") + const runnersTableSql = sql(runnersTable) + + // Migrate old tables if they exist + // TODO: Remove in next major version + const hasOldTables = yield* sql`SELECT shard_id FROM ${sql(table("shards"))} LIMIT 1`.pipe( + Effect.isSuccess + ) + if (hasOldTables) { + yield* sql`DROP TABLE ${sql(table("shards"))}`.pipe(Effect.ignore) + yield* sql`DROP TABLE ${runnersTableSql}`.pipe(Effect.ignore) + } + + yield* sql.onDialectOrElse({ + mssql: () => + sql` + IF OBJECT_ID(N'${runnersTableSql}', N'U') IS NULL + CREATE TABLE ${runnersTableSql} ( + machine_id INT IDENTITY PRIMARY KEY, + address VARCHAR(255) NOT NULL, + runner TEXT NOT NULL, + healthy BIT NOT NULL DEFAULT 1, + last_heartbeat DATETIME NOT NULL DEFAULT GETDATE(), + UNIQUE(address) + ) + `, + mysql: () => + sql` + CREATE TABLE IF NOT EXISTS ${runnersTableSql} ( + machine_id INT AUTO_INCREMENT PRIMARY KEY, + address VARCHAR(255) NOT NULL, + runner TEXT NOT NULL, + healthy BOOLEAN NOT NULL DEFAULT TRUE, + last_heartbeat DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + UNIQUE(address) + ) + `, + pg: () => + sql` + CREATE TABLE IF NOT EXISTS ${runnersTableSql} ( + machine_id SERIAL PRIMARY KEY, + address VARCHAR(255) NOT NULL, + runner TEXT NOT NULL, + healthy BOOLEAN NOT NULL DEFAULT TRUE, + last_heartbeat TIMESTAMP NOT NULL DEFAULT NOW(), + UNIQUE(address) + ) + `, + orElse: () => + // sqlite + sql` + CREATE TABLE IF NOT EXISTS ${runnersTableSql} ( + machine_id INTEGER PRIMARY KEY AUTOINCREMENT, + address TEXT NOT NULL, + runner TEXT NOT NULL, + healthy INTEGER NOT NULL DEFAULT 1, + last_heartbeat DATETIME NOT NULL DEFAULT (CURRENT_TIMESTAMP), + UNIQUE(address) + ) + ` + }) + + const locksTable = table("locks") + const locksTableSql = sql(locksTable) + + yield* sql.onDialectOrElse({ + mssql: () => + sql` + IF OBJECT_ID(N'${locksTableSql}', N'U') IS NULL + CREATE TABLE ${locksTableSql} ( + shard_id VARCHAR(50) PRIMARY KEY, + address VARCHAR(255) NOT NULL, + acquired_at DATETIME NOT NULL + ) + `, + mysql: () => Effect.void, + pg: () => Effect.void, + orElse: () => + // sqlite + sql` + CREATE TABLE IF NOT EXISTS ${locksTableSql} ( + shard_id TEXT PRIMARY KEY, + address TEXT NOT NULL, + acquired_at DATETIME NOT NULL + ) + ` + }) + + const sqlNowString = sql.onDialectOrElse({ + pg: () => "NOW()", + mysql: () => "NOW()", + mssql: () => "GETDATE()", + orElse: () => "CURRENT_TIMESTAMP" + }) + const sqlNow = sql.literal(sqlNowString) + + const expiresSeconds = sql.literal(Math.ceil(Duration.toSeconds(config.shardLockExpiration)).toString()) + const lockExpiresAt = sql.onDialectOrElse({ + pg: () => sql`${sqlNow} - INTERVAL '${expiresSeconds} seconds'`, + mysql: () => sql`DATE_SUB(${sqlNow}, INTERVAL ${expiresSeconds} SECOND)`, + mssql: () => sql`DATEADD(SECOND, -${expiresSeconds}, ${sqlNow})`, + orElse: () => sql`datetime(${sqlNow}, '-${expiresSeconds} seconds')` + }) + + const encodeBoolean = sql.onDialectOrElse({ + mssql: () => (b: boolean) => (b ? 1 : 0), + sqlite: () => (b: boolean) => (b ? 1 : 0), + orElse: () => (b: boolean) => b + }) + + // Upsert runner and return machine_id + const insertRunner = sql.onDialectOrElse({ + mssql: () => (address: string, runner: string, healthy: boolean) => + sql` + MERGE ${runnersTableSql} AS target + USING (SELECT ${address} AS address, ${runner} AS runner, ${sqlNow} AS last_heartbeat, ${ + encodeBoolean(healthy) + } AS healthy) AS source + ON target.address = source.address + WHEN MATCHED THEN + UPDATE SET runner = source.runner, last_heartbeat = source.last_heartbeat, healthy = source.healthy + WHEN NOT MATCHED THEN + INSERT (address, runner, last_heartbeat, healthy) + VALUES (source.address, source.runner, source.last_heartbeat, source.healthy) + OUTPUT INSERTED.machine_id; + `.values, + mysql: () => (address: string, runner: string, healthy: boolean) => + sql<{ machine_id: number }>` + INSERT INTO ${runnersTableSql} (address, runner, last_heartbeat, healthy) + VALUES (${address}, ${runner}, ${sqlNow}, ${healthy}) + ON DUPLICATE KEY UPDATE + runner = VALUES(runner), + last_heartbeat = VALUES(last_heartbeat), + healthy = VALUES(healthy); + SELECT machine_id FROM ${runnersTableSql} WHERE address = ${address}; + `.unprepared.pipe( + Effect.map((results: any) => [[results[1][0].machine_id]]) + ), + pg: () => (address: string, runner: string, healthy: boolean) => + sql` + INSERT INTO ${runnersTableSql} (address, runner, last_heartbeat, healthy) + VALUES (${address}, ${runner}, ${sqlNow}, ${healthy}) + ON CONFLICT (address) DO UPDATE + SET runner = EXCLUDED.runner, + last_heartbeat = EXCLUDED.last_heartbeat, + healthy = EXCLUDED.healthy + RETURNING machine_id + `.values, + orElse: () => (address: string, runner: string, healthy: boolean) => + // sqlite + sql` + INSERT INTO ${runnersTableSql} (address, runner, last_heartbeat, healthy) + VALUES (${address}, ${runner}, ${sqlNow}, ${encodeBoolean(healthy)}) + ON CONFLICT(address) DO UPDATE SET + runner = excluded.runner, + last_heartbeat = excluded.last_heartbeat, + healthy = excluded.healthy + RETURNING machine_id; + `.values + }) + + const execWithLockConn = (effect: Statement.Statement): Effect.Effect => { + if (!lockConnRef) return effect + const [query, params] = effect.compile() + return ScopedRef.get(lockConnRef).pipe( + Effect.flatMap((conn) => conn.executeRaw(query, params)), + Effect.onError(() => resetLockConn) + ) + } + const execWithLockConnValues = ( + effect: Statement.Statement + ): Effect.Effect>, SqlError> => { + if (!lockConnRef) return effect.values + const [query, params] = effect.compile() + return ScopedRef.get(lockConnRef).pipe( + Effect.flatMap((conn) => conn.executeValues(query, params)), + Effect.onError(() => resetLockConn) + ) + } + const resetLockConn = sql.onDialectOrElse({ + pg: () => + Effect.gen(function*() { + const conn = yield* ScopedRef.get(lockConnRef!) + yield* Effect.ignore(conn.executeRaw("SELECT pg_advisory_unlock_all()", [])) + yield* Effect.orDie(ScopedRef.set(lockConnRef!, sql.reserve)) + }), + mysql: () => + Effect.gen(function*() { + const conn = yield* ScopedRef.get(lockConnRef!) + yield* Effect.ignore(conn.executeRaw("SELECT RELEASE_ALL_LOCKS()", [])) + yield* Effect.orDie(ScopedRef.set(lockConnRef!, sql.reserve)) + }), + orElse: () => Effect.void + }) + + const acquireLock = sql.onDialectOrElse({ + pg: () => + Effect.fnUntraced(function*(_address: string, shardIds: ReadonlyArray) { + const conn = yield* ScopedRef.get(lockConnRef!) + const acquiredShardIds: Array = [] + const toAcquire = new Map(shardIds.map((shardId) => [lockNumbers.get(shardId)!, shardId])) + const takenLocks = yield* conn.executeValues( + `SELECT objid FROM pg_locks WHERE locktype = 'advisory' AND granted = true AND pid = pg_backend_pid() ORDER BY objid`, + [] + ) + for (let i = 0; i < takenLocks.length; i++) { + const lockNum = takenLocks[i][0] as number + acquiredShardIds.push(lockNumbersReverse.get(lockNum)!) + toAcquire.delete(lockNum) + } + if (toAcquire.size === 0) { + return acquiredShardIds + } + const results = (yield* conn.executeUnprepared(`SELECT ${pgLocks(toAcquire)}`, [], undefined))[0] as Record< + string, + boolean + > + for (const shardId in results) { + if (results[shardId]) { + acquiredShardIds.push(shardId) + } + } + return acquiredShardIds + }, Effect.onError(() => resetLockConn)), + + mysql: () => + Effect.fnUntraced(function*(_address: string, shardIds: ReadonlyArray) { + const conn = yield* ScopedRef.get(lockConnRef!) + const takenLocks = (yield* conn.executeUnprepared(`SELECT ${allMySqlTakenLocks}`, [], undefined))[0] as Record< + string, + 1 | null + > + const acquiredShardIds: Array = [] + const toAcquire: Array = [] + for (const shardId in takenLocks) { + if (takenLocks[shardId] === 1) { + acquiredShardIds.push(shardId) + } else if (shardIds.includes(shardId)) { + toAcquire.push(shardId) + } + } + if (toAcquire.length === 0) { + return acquiredShardIds + } + const results = (yield* conn.executeUnprepared(`SELECT ${mysqlLocks(toAcquire)}`, [], undefined))[0] as Record< + string, + number + > + for (const shardId in results) { + if (results[shardId] === 1) { + acquiredShardIds.push(shardId) + } + } + return acquiredShardIds + }, Effect.onError(() => resetLockConn)), + + mssql: () => (address: string, shardIds: ReadonlyArray) => { + const values = shardIds.map((shardId) => sql`(${stringLiteral(shardId)}, ${stringLiteral(address)}, ${sqlNow})`) + return sql` + MERGE ${locksTableSql} WITH (HOLDLOCK) AS target + USING (SELECT * FROM (VALUES ${sql.csv(values)})) AS source (shard_id, address, acquired_at) + ON target.shard_id = source.shard_id + WHEN MATCHED AND (target.address = source.address OR DATEDIFF(SECOND, target.acquired_at, ${sqlNow}) > ${expiresSeconds}) THEN + UPDATE SET address = source.address, acquired_at = source.acquired_at + WHEN NOT MATCHED THEN + INSERT (shard_id, address, acquired_at) + VALUES (source.shard_id, source.address, source.acquired_at); + `.pipe( + Effect.andThen(acquiredLocks(address, shardIds)), + sql.withTransaction + ) + }, + + orElse: () => (address: string, shardIds: ReadonlyArray) => { + const values = shardIds.map((shardId) => sql`(${stringLiteral(shardId)}, ${stringLiteral(address)}, ${sqlNow})`) + return sql` + WITH source(shard_id, address, acquired_at) AS (VALUES ${sql.csv(values)}) + INSERT INTO ${locksTableSql} (shard_id, address, acquired_at) + SELECT source.shard_id, source.address, source.acquired_at + FROM source + WHERE NOT EXISTS ( + SELECT 1 FROM ${locksTableSql} + WHERE shard_id = source.shard_id + AND address != ${address} + AND (strftime('%s', ${sqlNow}) - strftime('%s', acquired_at)) <= ${expiresSeconds} + ) + ON CONFLICT(shard_id) DO UPDATE + SET address = ${address}, acquired_at = ${sqlNow} + `.pipe( + Effect.andThen(acquiredLocks(address, shardIds)), + sql.withTransaction + ) + } + }) + + const lockNumbers = new Map() + const lockNumbersReverse = new Map() + for (let i = 0; i < config.shardGroups.length; i++) { + const group = config.shardGroups[i] + const base = (i + 1) * 1000000 + for (let shard = 1; shard <= config.shardsPerGroup; shard++) { + const shardId = ShardId.make(group, shard).toString() + const lockNum = base + shard + lockNumbers.set(shardId, lockNum) + lockNumbersReverse.set(lockNum, shardId) + } + } + + const lockNames = new Map() + const lockNamesReverse = new Map() + for (let i = 0; i < config.shardGroups.length; i++) { + const group = config.shardGroups[i] + for (let shard = 1; shard <= config.shardsPerGroup; shard++) { + const shardId = ShardId.make(group, shard).toString() + const lockName = `${prefix}.${shardId}` + lockNames.set(shardId, lockName) + lockNamesReverse.set(lockName, shardId) + } + } + + const pgLocks = (shardIdsMap: Map) => + Array.from( + shardIdsMap.entries(), + ([lockNum, shardId]) => `pg_try_advisory_lock(${lockNum}) AS "${shardId}"` + ).join(", ") + + const mysqlLocks = (shardIds: ReadonlyArray) => + shardIds.map((shardId) => `GET_LOCK('${lockNames.get(shardId)!}', 0) AS "${shardId}"`).join(", ") + + const allMySqlTakenLocks = Array.from( + lockNames.entries(), + ([shardId, lockName]) => `IS_USED_LOCK('${lockName}') = CONNECTION_ID() AS "${shardId}"` + ).join(", ") + + const acquiredLocks = (address: string, shardIds: ReadonlyArray) => + sql<{ shard_id: string }>` + SELECT shard_id FROM ${sql(locksTable)} + WHERE address = ${address} + AND acquired_at >= ${lockExpiresAt} + AND shard_id IN ${stringLiteralArr(shardIds)} + `.values.pipe( + Effect.map((rows) => rows.map((row) => row[0] as string)) + ) + + const wrapString = sql.onDialectOrElse({ + mssql: () => (s: string) => `N'${s}'`, + orElse: () => (s: string) => `'${s}'` + }) + const stringLiteral = (s: string) => sql.literal(wrapString(s)) + const stringLiteralArr = (arr: ReadonlyArray) => sql.literal(`(${arr.map(wrapString).join(",")})`) + + const refreshShards = sql.onDialectOrElse({ + pg: () => acquireLock, + mysql: () => acquireLock, + mssql: () => (address: string, shardIds: ReadonlyArray) => + sql` + UPDATE ${locksTableSql} + SET acquired_at = ${sqlNow} + OUTPUT inserted.shard_id + WHERE address = ${address} AND shard_id IN ${stringLiteralArr(shardIds)} + `.pipe(execWithLockConnValues, Effect.map((rows) => rows.map((row) => row[0] as string))), + orElse: () => (address: string, shardIds: ReadonlyArray) => + sql` + UPDATE ${locksTableSql} + SET acquired_at = ${sqlNow} + WHERE address = ${address} AND shard_id IN ${stringLiteralArr(shardIds)} + RETURNING shard_id + `.pipe(execWithLockConnValues, Effect.map((rows) => rows.map((row) => row[0] as string))) + }) + + return RunnerStorage.makeEncoded({ + getRunners: sql`SELECT runner, healthy FROM ${runnersTableSql} WHERE last_heartbeat > ${lockExpiresAt}`.values.pipe( + PersistenceError.refail, + Effect.map(Arr.map(([runner, healthy]) => [String(runner), Boolean(healthy)] as const)), + withTracerDisabled + ), + + register: (address, runner, healthy) => + insertRunner(address, runner, healthy).pipe( + Effect.map((rows: any) => Number(rows[0][0])), + PersistenceError.refail, + withTracerDisabled + ), + + unregister: (address) => + sql`DELETE FROM ${runnersTableSql} WHERE address = ${address} OR last_heartbeat < ${lockExpiresAt}`.pipe( + Effect.asVoid, + PersistenceError.refail, + withTracerDisabled + ), + + setRunnerHealth: (address, healthy) => + sql`UPDATE ${runnersTableSql} SET healthy = ${encodeBoolean(healthy)} WHERE address = ${address}` + .pipe( + Effect.asVoid, + PersistenceError.refail, + withTracerDisabled + ), + + acquire: (address, shardIds) => + acquireLock(address, shardIds).pipe( + PersistenceError.refail, + withTracerDisabled + ), + + refresh: (address, shardIds) => + sql`UPDATE ${runnersTableSql} SET last_heartbeat = ${sqlNow} WHERE address = ${address}`.pipe( + execWithLockConn, + shardIds.length > 0 ? + Effect.andThen(refreshShards(address, shardIds)) : + Effect.as([]), + PersistenceError.refail + ), + + release: sql.onDialectOrElse({ + pg: () => + Effect.fnUntraced( + function*(_address, shardId) { + const lockNum = lockNumbers.get(shardId)! + const conn = yield* ScopedRef.get(lockConnRef!) + const release = conn.executeRaw(`SELECT pg_advisory_unlock(${lockNum})`, []) + const check = conn.executeValues( + `SELECT 1 FROM pg_locks WHERE locktype = 'advisory' AND granted = true AND pid = pg_backend_pid() AND objid = ${lockNum}`, + [] + ) + while (true) { + yield* release + const takenLocks = yield* check + if (takenLocks.length === 0) return + } + }, + Effect.onError(() => resetLockConn), + Effect.asVoid, + PersistenceError.refail, + withTracerDisabled + ), + mysql: () => + Effect.fnUntraced( + function*(_address, shardId) { + const conn = yield* ScopedRef.get(lockConnRef!) + const lockName = lockNames.get(shardId)! + const release = conn.executeRaw(`SELECT RELEASE_LOCK('${lockName}')`, []) + const check = conn.executeValues( + `SELECT IS_USED_LOCK('${lockName}') = CONNECTION_ID() AS is_taken`, + [] + ) + while (true) { + yield* release + const takenLocks = yield* check + if (takenLocks.length === 0 || takenLocks[0][0] !== 1) return + } + }, + Effect.onError(() => resetLockConn), + Effect.asVoid, + PersistenceError.refail, + withTracerDisabled + ), + orElse: () => (address, shardId) => + sql`DELETE FROM ${locksTableSql} WHERE address = ${address} AND shard_id = ${shardId}`.pipe( + PersistenceError.refail, + withTracerDisabled + ) + }), + + releaseAll: sql.onDialectOrElse({ + pg: () => (_address) => + sql`SELECT pg_advisory_unlock_all()`.pipe( + execWithLockConn, + Effect.asVoid, + PersistenceError.refail, + withTracerDisabled + ), + mysql: () => (_address) => + sql`SELECT RELEASE_ALL_LOCKS()`.pipe( + execWithLockConn, + Effect.asVoid, + PersistenceError.refail, + withTracerDisabled + ), + orElse: () => (address) => + sql`DELETE FROM ${locksTableSql} WHERE address = ${address}`.pipe( + PersistenceError.refail, + withTracerDisabled + ) + }) + }) +}, withTracerDisabled) + +/** + * @since 1.0.0 + * @category Layers + */ +export const layer: Layer.Layer< + RunnerStorage.RunnerStorage, + SqlError, + SqlClient.SqlClient | ShardingConfig.ShardingConfig +> = Layer.scoped(RunnerStorage.RunnerStorage)(make({})) + +/** + * @since 1.0.0 + * @category Layers + */ +export const layerWith = (options: { + readonly prefix?: string | undefined +}): Layer.Layer => + Layer.scoped(RunnerStorage.RunnerStorage)(make(options)) diff --git a/packages/cluster/src/SqlShardStorage.ts b/packages/cluster/src/SqlShardStorage.ts deleted file mode 100644 index 6506a611c77..00000000000 --- a/packages/cluster/src/SqlShardStorage.ts +++ /dev/null @@ -1,329 +0,0 @@ -/** - * @since 1.0.0 - */ -import * as SqlClient from "@effect/sql/SqlClient" -import type { SqlError } from "@effect/sql/SqlError" -import * as Arr from "effect/Array" -import * as Effect from "effect/Effect" -import * as Layer from "effect/Layer" -import { PersistenceError } from "./ClusterError.js" -import * as ShardStorage from "./ShardStorage.js" - -const withTracerDisabled = Effect.withTracerEnabled(false) - -/** - * @since 1.0.0 - * @category Constructors - */ -export const make = Effect.fnUntraced(function*(options?: { - readonly prefix?: string | undefined -}) { - const sql = (yield* SqlClient.SqlClient).withoutTransforms() - const prefix = options?.prefix ?? "cluster" - const table = (name: string) => `${prefix}_${name}` - - const runnersTable = table("runners") - const runnersTableSql = sql(runnersTable) - - yield* sql.onDialectOrElse({ - mssql: () => - sql` - IF OBJECT_ID(N'${runnersTableSql}', N'U') IS NULL - CREATE TABLE ${runnersTableSql} ( - address VARCHAR(255) PRIMARY KEY, - runner TEXT NOT NULL - ) - `, - mysql: () => - sql` - CREATE TABLE IF NOT EXISTS ${runnersTableSql} ( - address VARCHAR(255) PRIMARY KEY, - runner TEXT NOT NULL - ) - `, - pg: () => - sql` - CREATE TABLE IF NOT EXISTS ${runnersTableSql} ( - address VARCHAR(255) PRIMARY KEY, - runner TEXT NOT NULL - ) - `, - orElse: () => - // sqlite - sql` - CREATE TABLE IF NOT EXISTS ${runnersTableSql} ( - address TEXT PRIMARY KEY, - runner TEXT NOT NULL - ) - ` - }) - - const shardsTable = table("shards") - const shardsTableSql = sql(shardsTable) - - yield* sql.onDialectOrElse({ - mssql: () => - sql` - IF OBJECT_ID(N'${shardsTableSql}', N'U') IS NULL - CREATE TABLE ${shardsTableSql} ( - shard_id VARCHAR(50) PRIMARY KEY, - address VARCHAR(255) - ) - `, - mysql: () => - sql` - CREATE TABLE IF NOT EXISTS ${shardsTableSql} ( - shard_id VARCHAR(50) PRIMARY KEY, - address VARCHAR(255) - ) - `, - pg: () => - sql` - CREATE TABLE IF NOT EXISTS ${shardsTableSql} ( - shard_id VARCHAR(50) PRIMARY KEY, - address VARCHAR(255) - ) - `, - orElse: () => - // sqlite - sql` - CREATE TABLE IF NOT EXISTS ${shardsTableSql} ( - shard_id TEXT PRIMARY KEY, - address TEXT - ) - ` - }) - - const locksTable = table("locks") - const locksTableSql = sql(locksTable) - - yield* sql.onDialectOrElse({ - mssql: () => - sql` - IF OBJECT_ID(N'${locksTableSql}', N'U') IS NULL - CREATE TABLE ${locksTableSql} ( - shard_id VARCHAR(50) PRIMARY KEY, - address VARCHAR(255) NOT NULL, - acquired_at DATETIME NOT NULL - ) - `, - mysql: () => - sql` - CREATE TABLE IF NOT EXISTS ${locksTableSql} ( - shard_id VARCHAR(50) PRIMARY KEY, - address VARCHAR(255) NOT NULL, - acquired_at DATETIME NOT NULL - ) - `, - pg: () => - sql` - CREATE TABLE IF NOT EXISTS ${locksTableSql} ( - shard_id VARCHAR(50) PRIMARY KEY, - address VARCHAR(255) NOT NULL, - acquired_at TIMESTAMP NOT NULL - ) - `, - orElse: () => - // sqlite - sql` - CREATE TABLE IF NOT EXISTS ${locksTableSql} ( - shard_id TEXT PRIMARY KEY, - address TEXT NOT NULL, - acquired_at DATETIME NOT NULL - ) - ` - }) - - const sqlNowString = sql.onDialectOrElse({ - pg: () => "NOW()", - mysql: () => "NOW()", - mssql: () => "GETDATE()", - orElse: () => "CURRENT_TIMESTAMP" - }) - const sqlNow = sql.literal(sqlNowString) - - const lockExpiresAt = sql.onDialectOrElse({ - pg: () => sql`${sqlNow} - INTERVAL '5 seconds'`, - mysql: () => sql`DATE_SUB(${sqlNow}, INTERVAL 5 SECOND)`, - mssql: () => sql`DATEADD(SECOND, -5, ${sqlNow})`, - orElse: () => sql`datetime(${sqlNow}, '-5 seconds')` - }) - - const acquireLock = sql.onDialectOrElse({ - pg: () => (address: string, values: Array) => - sql` - INSERT INTO ${locksTableSql} (shard_id, address, acquired_at) VALUES ${sql.csv(values)} - ON CONFLICT (shard_id) DO UPDATE - SET address = ${address}, acquired_at = ${sqlNow} - WHERE ${locksTableSql}.address = ${address} - OR ${locksTableSql}.acquired_at < ${lockExpiresAt} - `, - mysql: () => (_address: string, values: Array) => - sql` - INSERT INTO ${locksTableSql} (shard_id, address, acquired_at) VALUES ${sql.csv(values)} - ON DUPLICATE KEY UPDATE - address = IF(address = VALUES(address) OR acquired_at < ${lockExpiresAt}, VALUES(address), address), - acquired_at = IF(address = VALUES(address) OR acquired_at < ${lockExpiresAt}, VALUES(acquired_at), acquired_at) - `.unprepared, - mssql: () => (_address: string, values: Array) => - sql` - MERGE ${locksTableSql} WITH (HOLDLOCK) AS target - USING (SELECT * FROM (VALUES ${sql.csv(values)})) AS source (shard_id, address, acquired_at) - ON target.shard_id = source.shard_id - WHEN MATCHED AND (target.address = source.address OR DATEDIFF(SECOND, target.acquired_at, ${sqlNow}) > 5) THEN - UPDATE SET address = source.address, acquired_at = source.acquired_at - WHEN NOT MATCHED THEN - INSERT (shard_id, address, acquired_at) - VALUES (source.shard_id, source.address, source.acquired_at); - `, - orElse: () => (address: string, values: Array) => - // sqlite - sql` - WITH source(shard_id, address, acquired_at) AS (VALUES ${sql.csv(values)}) - INSERT INTO ${locksTableSql} (shard_id, address, acquired_at) - SELECT source.shard_id, source.address, source.acquired_at - FROM source - WHERE NOT EXISTS ( - SELECT 1 FROM ${locksTableSql} - WHERE shard_id = source.shard_id - AND address != ${address} - AND (strftime('%s', ${sqlNow}) - strftime('%s', acquired_at)) <= 5 - ) - ON CONFLICT(shard_id) DO UPDATE - SET address = ${address}, acquired_at = ${sqlNow} - ` - }) - - const wrapString = sql.onDialectOrElse({ - mssql: () => (s: string) => `N'${s}'`, - orElse: () => (s: string) => `'${s}'` - }) - const wrapStringArr = (arr: ReadonlyArray) => sql.literal(arr.map(wrapString).join(", ")) - - const refreshShards = sql.onDialectOrElse({ - mysql: () => (address: string, shardIds: ReadonlyArray) => { - const shardIdsStr = wrapStringArr(shardIds) - return sql>` - UPDATE ${locksTableSql} - SET acquired_at = ${sqlNow} - WHERE address = ${address} AND shard_id IN (${shardIdsStr}); - SELECT shard_id FROM ${locksTableSql} WHERE address = ${address} AND shard_id IN (${shardIdsStr}) - `.unprepared.pipe( - Effect.map((rows) => rows[1].map((row) => [row.shard_id])) - ) - }, - mssql: () => (address: string, shardIds: ReadonlyArray) => - sql` - UPDATE ${locksTableSql} - SET acquired_at = ${sqlNow} - OUTPUT inserted.shard_id - WHERE address = ${address} AND shard_id IN (${wrapStringArr(shardIds)}) - `.values, - orElse: () => (address: string, shardIds: ReadonlyArray) => - sql` - UPDATE ${locksTableSql} - SET acquired_at = ${sqlNow} - WHERE address = ${address} AND shard_id IN (${wrapStringArr(shardIds)}) - RETURNING shard_id - `.values - }) - - return ShardStorage.makeEncoded({ - getAssignments: sql`SELECT shard_id, address FROM ${shardsTableSql} ORDER BY shard_id`.values.pipe( - PersistenceError.refail, - withTracerDisabled - ) as any, - - saveAssignments: (assignments) => { - const remove = sql`DELETE FROM ${shardsTableSql}` - if (assignments.length === 0) { - return PersistenceError.refail(remove) - } - const values = assignments.map(([shardId, address]) => sql`(${shardId}, ${address})`) - return remove.pipe( - Effect.andThen(sql`INSERT INTO ${shardsTableSql} (shard_id, address) VALUES ${sql.csv(values)}`.unprepared), - sql.withTransaction, - PersistenceError.refail, - withTracerDisabled - ) - }, - - getRunners: sql`SELECT address, runner FROM ${runnersTableSql}`.values.pipe( - PersistenceError.refail, - Effect.map(Arr.map(([address, runner]) => [String(address), String(runner)] as const)), - withTracerDisabled - ), - - saveRunners: (runners) => { - const remove = sql`DELETE FROM ${runnersTableSql}` - if (runners.length === 0) { - return PersistenceError.refail(remove) - } - const values = runners.map(([address, runner]) => sql`(${address}, ${runner})`) - const insert = sql`INSERT INTO ${runnersTableSql} (address, runner) VALUES ${sql.csv(values)}`.unprepared - return remove.pipe( - Effect.andThen(insert), - sql.withTransaction, - PersistenceError.refail, - withTracerDisabled - ) - }, - - acquire: Effect.fnUntraced( - function*(address, shardIds) { - if (shardIds.length > 0) { - const values = shardIds.map((shardId) => sql`(${shardId}, ${address}, ${sqlNow})`) - yield* acquireLock(address, values) - } - const currentLocks = yield* sql<{ shard_id: string }>` - SELECT shard_id FROM ${sql(locksTable)} - WHERE address = ${address} AND acquired_at >= ${lockExpiresAt} - `.values - return currentLocks.map((row) => row[0] as string) - }, - sql.withTransaction, - PersistenceError.refail, - withTracerDisabled - ), - - refresh: (address, shardIds) => - shardIds.length === 0 - ? Effect.succeed([]) - : refreshShards(address, shardIds).pipe( - Effect.map((rows) => rows.map((row) => row[0] as string)), - PersistenceError.refail, - withTracerDisabled - ), - - release: (address, shardId) => - sql`DELETE FROM ${locksTableSql} WHERE address = ${address} AND shard_id = ${shardId}`.pipe( - PersistenceError.refail, - withTracerDisabled - ), - - releaseAll: (address) => - sql`DELETE FROM ${locksTableSql} WHERE address = ${address}`.pipe( - PersistenceError.refail, - withTracerDisabled - ) - }) -}, withTracerDisabled) - -/** - * @since 1.0.0 - * @category Layers - */ -export const layer: Layer.Layer< - ShardStorage.ShardStorage, - SqlError, - SqlClient.SqlClient -> = Layer.effect(ShardStorage.ShardStorage, make()) - -/** - * @since 1.0.0 - * @category Layers - */ -export const layerWith = (options: { - readonly prefix?: string | undefined -}): Layer.Layer => - Layer.scoped(ShardStorage.ShardStorage, make(options)) diff --git a/packages/cluster/src/SynchronizedClock.ts b/packages/cluster/src/SynchronizedClock.ts deleted file mode 100644 index 6385093e245..00000000000 --- a/packages/cluster/src/SynchronizedClock.ts +++ /dev/null @@ -1,82 +0,0 @@ -/** - * @since 1.0.0 - */ -import * as Clock from "effect/Clock" -import * as Duration from "effect/Duration" -import * as Effect from "effect/Effect" -import * as Layer from "effect/Layer" -import * as Schedule from "effect/Schedule" -import type { Scope } from "effect/Scope" -import { ShardManagerClient } from "./ShardManager.js" - -/** - * @since 1.0.0 - * @category Constructors - */ -export const make: (getRemoteTime: Effect.Effect) => Effect.Effect< - Clock.Clock, - never, - Scope -> = Effect.fnUntraced(function*(getRemoteTime) { - const clock = yield* Effect.clock - - let driftMillis = 0 - let driftNanos = BigInt(0) - - yield* getRemoteTime.pipe( - Effect.timed, - Effect.map(([duration, shardManagerTime]) => { - const halfTrip = Duration.unsafeDivide(duration, 2) - shardManagerTime = shardManagerTime + Duration.toMillis(halfTrip) + 1 - const selfTime = clock.unsafeCurrentTimeMillis() - return shardManagerTime - selfTime - }), - Effect.replicateEffect(5), - Effect.flatMap((drifts) => { - drifts.sort() - const drift = (driftMillis + drifts[2]) / 2 - driftMillis = Math.round(drift) - driftNanos = BigInt(Math.round(drift * 1_000_000)) - return Effect.logDebug("Current drift", driftMillis) - }), - Effect.andThen(Effect.sleep(Duration.minutes(5))), - Effect.forever, - Effect.sandbox, - Effect.retry(Schedule.spaced(Duration.minutes(1))), - Effect.annotateLogs({ - package: "@effect/cluster", - service: "SynchronizedClock" - }), - Effect.forkScoped - ) - - function unsafeCurrentTimeMillis() { - return clock.unsafeCurrentTimeMillis() + driftMillis - } - function unsafeCurrentTimeNanos() { - return clock.unsafeCurrentTimeNanos() + driftNanos - } - - return Clock.Clock.of({ - [Clock.ClockTypeId]: Clock.ClockTypeId, - sleep: clock.sleep, - unsafeCurrentTimeMillis, - unsafeCurrentTimeNanos, - currentTimeMillis: Effect.sync(unsafeCurrentTimeMillis), - currentTimeNanos: Effect.sync(unsafeCurrentTimeNanos) - }) -}) - -/** - * @since 1.0.0 - * @category Layers - */ -export const layer: Layer.Layer< - never, - never, - ShardManagerClient -> = Layer.unwrapScoped(Effect.gen(function*() { - const shardManager = yield* ShardManagerClient - const clock = yield* make(shardManager.getTime) - return Layer.setClock(clock) -})) diff --git a/packages/cluster/src/index.ts b/packages/cluster/src/index.ts index 45dec199dff..3e6fc93ffd4 100644 --- a/packages/cluster/src/index.ts +++ b/packages/cluster/src/index.ts @@ -63,21 +63,11 @@ export * as EntityType from "./EntityType.js" */ export * as Envelope from "./Envelope.js" -/** - * @since 1.0.0 - */ -export * as HttpCommon from "./HttpCommon.js" - /** * @since 1.0.0 */ export * as HttpRunner from "./HttpRunner.js" -/** - * @since 1.0.0 - */ -export * as HttpShardManager from "./HttpShardManager.js" - /** * @since 1.0.0 */ @@ -121,22 +111,17 @@ export * as RunnerServer from "./RunnerServer.js" /** * @since 1.0.0 */ -export * as Runners from "./Runners.js" - -/** - * @since 1.0.0 - */ -export * as ShardId from "./ShardId.js" +export * as RunnerStorage from "./RunnerStorage.js" /** * @since 1.0.0 */ -export * as ShardManager from "./ShardManager.js" +export * as Runners from "./Runners.js" /** * @since 1.0.0 */ -export * as ShardStorage from "./ShardStorage.js" +export * as ShardId from "./ShardId.js" /** * @since 1.0.0 @@ -173,11 +158,6 @@ export * as Snowflake from "./Snowflake.js" */ export * as SocketRunner from "./SocketRunner.js" -/** - * @since 1.0.0 - */ -export * as SocketShardManager from "./SocketShardManager.js" - /** * @since 1.0.0 */ @@ -186,9 +166,4 @@ export * as SqlMessageStorage from "./SqlMessageStorage.js" /** * @since 1.0.0 */ -export * as SqlShardStorage from "./SqlShardStorage.js" - -/** - * @since 1.0.0 - */ -export * as SynchronizedClock from "./SynchronizedClock.js" +export * as SqlRunnerStorage from "./SqlRunnerStorage.js" diff --git a/packages/cluster/src/internal/entityManager.ts b/packages/cluster/src/internal/entityManager.ts index 07923e14bc9..c48e5b92b80 100644 --- a/packages/cluster/src/internal/entityManager.ts +++ b/packages/cluster/src/internal/entityManager.ts @@ -54,6 +54,7 @@ export interface EntityManager { readonly isProcessingFor: (message: Message.Incoming, options?: { readonly excludeReplies?: boolean }) => boolean + readonly clearProcessed: () => void readonly interruptShard: (shardId: ShardId) => Effect.Effect @@ -108,13 +109,15 @@ export const make = Effect.fnUntraced(function*< ) const activeServers = new Map() + const serverCloseLatches = new Map() + const processedRequestIds = new Set() const entities: ResourceMap< EntityAddress, EntityState, EntityNotAssignedToRunner > = yield* ResourceMap.make(Effect.fnUntraced(function*(address: EntityAddress) { - if (yield* options.sharding.isShutdown) { + if (!options.sharding.hasShardId(address.shardId)) { return yield* new EntityNotAssignedToRunner({ address }) } @@ -122,9 +125,13 @@ export const make = Effect.fnUntraced(function*< const endLatch = yield* Effect.makeLatch() // on shutdown, reset the storage for the entity - yield* Scope.addFinalizer( + yield* Scope.addFinalizerExit( scope, - Effect.ignore(options.storage.resetAddress(address)) + () => { + serverCloseLatches.get(address)?.unsafeOpen() + serverCloseLatches.delete(address) + return Effect.ignore(options.storage.resetAddress(address)) + } ) const activeRequests: EntityState["activeRequests"] = new Map() @@ -176,6 +183,20 @@ export const make = Effect.fnUntraced(function*< (isShuttingDown || Context.get(request.rpc.annotations, Uninterruptible) || isInterruptIgnore(response.exit.cause)) ) { + if (!isShuttingDown) { + return server.write(0, { + ...request.message.envelope, + id: RequestId(request.message.envelope.requestId), + tag: request.message.envelope.tag as any, + payload: new Request({ + ...request.message.envelope, + lastSentChunk: request.lastSentChunk + } as any) as any + }).pipe( + Effect.forkIn(scope) + ) + } + activeRequests.delete(response.requestId) return options.storage.unregisterReplyHandler(request.message.envelope.requestId) } return retryRespond( @@ -191,6 +212,7 @@ export const make = Effect.fnUntraced(function*< ) ).pipe( Effect.flatMap(() => { + processedRequestIds.add(request.message.envelope.requestId) activeRequests.delete(response.requestId) // ensure that the reaper does not remove the entity as we haven't @@ -304,6 +326,7 @@ export const make = Effect.fnUntraced(function*< scope, Effect.withFiberRuntime((fiber) => { activeServers.delete(address.entityId) + serverCloseLatches.set(address, Effect.unsafeMakeLatch(false)) internalInterruptors.add(fiber.id()) return state.write(0, { _tag: "Eof" }).pipe( Effect.andThen(Effect.interruptible(endLatch.await)), @@ -349,7 +372,7 @@ export const make = Effect.fnUntraced(function*< // one sender for the same request. In this case, the other senders // should resume from storage only. let entry = server.activeRequests.get(message.envelope.requestId) - if (entry) { + if (entry || processedRequestIds.has(message.envelope.requestId)) { return Effect.fail( new AlreadyProcessingMessage({ envelopeId: message.envelope.requestId, @@ -417,17 +440,22 @@ export const make = Effect.fnUntraced(function*< const interruptShard = (shardId: ShardId) => Effect.suspend(function loop(): Effect.Effect { - const toInterrupt = new Set() - for (const state of activeServers.values()) { + const toAwait = Arr.empty>() + activeServers.forEach((state) => { if (shardId[Equal.symbol](state.address.shardId)) { - toInterrupt.add(state) + toAwait.push(entities.removeIgnore(state.address)) } - } - if (toInterrupt.size === 0) { + }) + serverCloseLatches.forEach((latch, address) => { + if (shardId[Equal.symbol](address.shardId)) { + toAwait.push(latch.await) + } + }) + if (toAwait.length === 0) { return Effect.void } return Effect.flatMap( - Effect.forEach(toInterrupt, (state) => entities.removeIgnore(state.address), { + Effect.all(toAwait, { concurrency: "unbounded", discard: true }), @@ -440,6 +468,9 @@ export const make = Effect.fnUntraced(function*< return identity({ interruptShard, isProcessingFor(message, options) { + if (options?.excludeReplies !== true && processedRequestIds.has(message.envelope.requestId)) { + return true + } const state = activeServers.get(message.envelope.address.entityId) if (!state) return false const request = state.activeRequests.get(message.envelope.requestId) @@ -450,6 +481,9 @@ export const make = Effect.fnUntraced(function*< } return true }, + clearProcessed() { + processedRequestIds.clear() + }, sendLocal, send: (message) => decodeMessage(message).pipe( diff --git a/packages/cluster/src/internal/shardManager.ts b/packages/cluster/src/internal/shardManager.ts deleted file mode 100644 index 6a34d51f76f..00000000000 --- a/packages/cluster/src/internal/shardManager.ts +++ /dev/null @@ -1,412 +0,0 @@ -import * as Arr from "effect/Array" -import * as Clock from "effect/Clock" -import * as Effect from "effect/Effect" -import { constFalse } from "effect/Function" -import * as MutableHashMap from "effect/MutableHashMap" -import * as MutableHashSet from "effect/MutableHashSet" -import * as Option from "effect/Option" -import type { Runner } from "../Runner.js" -import type { RunnerAddress } from "../RunnerAddress.js" -import { RunnerHealth } from "../RunnerHealth.js" -import { ShardId } from "../ShardId.js" -import { ShardStorage } from "../ShardStorage.js" - -/** @internal */ -export class State { - static fromStorage = Effect.fnUntraced(function*( - shardsPerGroup: number - ) { - const storage = yield* ShardStorage - const runnerHealth = yield* RunnerHealth - - // Fetch registered runners and shard assignments from cluster storage - const storedRunners = yield* storage.getRunners - const storedAssignments = yield* storage.getAssignments - - // Determine which runners are still alive - const deadRunners = Arr.empty() - const aliveRunners = MutableHashMap.empty() - yield* Effect.forEach(storedRunners, ([address, runner]) => - Effect.map(runnerHealth.isAlive(address), (isAlive) => { - if (isAlive) { - MutableHashMap.set(aliveRunners, address, runner) - } else { - deadRunners.push(runner) - } - }), { concurrency: "unbounded", discard: true }) - if (deadRunners.length > 0) { - yield* Effect.logWarning("Ignoring runners that are no longer considered alive:", deadRunners) - } - - // Determine which shards remain unassigned to a runner - const assignedShards = MutableHashMap.empty() - const invalidAssignments = Arr.empty<[ShardId, RunnerAddress]>() - for (const [shard, address] of storedAssignments) { - if (Option.isSome(address) && MutableHashMap.has(aliveRunners, address.value)) { - MutableHashMap.set(assignedShards, shard, address.value) - } else if (Option.isSome(address)) { - invalidAssignments.push([shard, address.value]) - } - } - if (invalidAssignments.length > 0) { - yield* Effect.logWarning( - "Ignoring shard assignments for runners that are no longer considered alive: ", - invalidAssignments - ) - } - - // Construct the initial state - const now = yield* Clock.currentTimeMillis - const allRunners = MutableHashMap.empty() - const runnerState = new Map>() - for (const [address, runner] of aliveRunners) { - const withMetadata = RunnerWithMetadata({ runner, registeredAt: now }) - MutableHashMap.set(allRunners, address, withMetadata) - for (const group of runner.groups) { - let groupMap = runnerState.get(group) - if (!groupMap) { - groupMap = MutableHashMap.empty() - runnerState.set(group, groupMap) - } - MutableHashMap.set(groupMap, address, withMetadata) - } - } - - const shardState = new Map>>() - for (const group of runnerState.keys()) { - const groupMap = new Map>() - shardState.set(group, groupMap) - for (let n = 1; n <= shardsPerGroup; n++) { - const shardId = new ShardId({ group, id: n }) - groupMap.set(n, MutableHashMap.get(assignedShards, shardId)) - } - } - - return new State(allRunners, runnerState, shardState, shardsPerGroup) - }) - - constructor( - readonly allRunners: MutableHashMap.MutableHashMap, - readonly runners: Map>, - readonly shards: Map>>, - readonly shardsPerGroup: number - ) { - this.assignments = MutableHashMap.empty>() - this.perRunner = new Map>>() - - for (const [address, meta] of this.allRunners) { - for (const group of meta.runner.groups) { - let runnerMap = this.perRunner.get(group) - if (!runnerMap) { - runnerMap = MutableHashMap.empty>() - this.perRunner.set(group, runnerMap) - } - MutableHashMap.set(runnerMap, address, new Set()) - } - } - - for (const [group, groupMap] of this.shards) { - const perRunnerMap = this.perRunner.get(group)! - for (const [id, address_] of groupMap) { - const address = Option.filter(address_, (addr) => MutableHashMap.has(this.allRunners, addr)) - MutableHashMap.set(this.assignments, new ShardId({ group, id }), address) - if (Option.isSome(address)) { - Option.getOrUndefined(MutableHashMap.get(perRunnerMap, address.value))?.add(id) - } - } - } - } - - readonly assignments: MutableHashMap.MutableHashMap> - readonly perRunner: Map>> - - addGroup(group: string): void { - this.runners.set(group, MutableHashMap.empty()) - const shardMap = new Map>() - this.shards.set(group, shardMap) - for (let n = 1; n <= this.shardsPerGroup; n++) { - shardMap.set(n, Option.none()) - MutableHashMap.set(this.assignments, new ShardId({ group, id: n }), Option.none()) - } - - const perRunnerMap = MutableHashMap.empty>() - this.perRunner.set(group, perRunnerMap) - for (const [address] of this.allRunners) { - MutableHashMap.set(perRunnerMap, address, new Set()) - } - } - - addAssignments( - shards: Iterable, - address: Option.Option - ) { - for (const shardId of shards) { - const currentAddress = Option.flatten(MutableHashMap.get(this.assignments, shardId)) - MutableHashMap.set(this.assignments, shardId, address) - this.shards.get(shardId.group)?.set(shardId.id, address) - - const perRunner = this.perRunner.get(shardId.group)! - if (Option.isSome(currentAddress)) { - Option.getOrUndefined(MutableHashMap.get(perRunner, currentAddress.value))?.delete(shardId.id) - } - if (Option.isSome(address)) { - Option.getOrUndefined(MutableHashMap.get(perRunner, address.value))?.add(shardId.id) - } - } - } - - addRunner(runner: Runner, registeredAt: number): void { - const withMetadata = RunnerWithMetadata({ runner, registeredAt }) - MutableHashMap.set(this.allRunners, runner.address, withMetadata) - for (const group of runner.groups) { - if (!this.runners.has(group)) { - this.addGroup(group) - } - const groupMap = this.runners.get(group)! - MutableHashMap.set(groupMap, runner.address, withMetadata) - const perRunner = this.perRunner.get(group)! - MutableHashMap.set(perRunner, runner.address, new Set()) - } - } - - removeRunner(address: RunnerAddress): void { - MutableHashMap.remove(this.allRunners, address) - for (const group of this.runners.keys()) { - const groupMap = this.runners.get(group)! - MutableHashMap.remove(groupMap, address) - - const perRunner = this.perRunner.get(group)! - MutableHashMap.remove(perRunner, address) - } - } - - get maxVersion(): Option.Option { - if (MutableHashMap.size(this.allRunners) === 0) return Option.none() - let version: number | undefined = undefined - for (const [, meta] of this.allRunners) { - if (version === undefined || meta.runner.version > version) { - version = meta.runner.version - } - } - return Option.some(version!) - } - - allRunnersHaveVersion(version: Option.Option): boolean { - return version.pipe( - Option.map((max) => Arr.every(this.runnerVersions, (version) => version === max)), - Option.getOrElse(constFalse) - ) - } - - get shardStats(): { - readonly perRunner: Map - readonly unassigned: number - } { - const perRunner = new Map() - let unassigned = 0 - for (const [, address] of this.assignments) { - if (Option.isNone(address)) { - unassigned++ - continue - } - const runner = address.value.toString() - const count = perRunner.get(runner) ?? 0 - perRunner.set(runner, count + 1) - } - - return { perRunner, unassigned } - } - - shardsPerRunner(group: string): MutableHashMap.MutableHashMap> { - const shards = MutableHashMap.empty>() - const perRunner = this.perRunner.get(group) - if (!perRunner || MutableHashMap.isEmpty(perRunner)) return shards - - for (const [address, shardSet] of perRunner) { - MutableHashMap.set(shards, address, new Set(shardSet)) - } - - return shards - } - - averageShardsPerRunner(group: string): number { - const runnerCount = MutableHashMap.size(this.runners.get(group) ?? MutableHashMap.empty()) - const shardGroup = this.shards.get(group) ?? new Map() - return runnerCount > 0 ? shardGroup.size / runnerCount : 0 - } - - get allUnassignedShards(): Array { - const unassigned: Array = [] - for (const [shardId, address] of this.assignments) { - if (Option.isNone(address)) { - unassigned.push(shardId) - } - } - return unassigned - } - - unassignedShards(group: string): Array { - const shardIds: Array = [] - const assignments = this.shards.get(group)! - for (const [shard, address] of assignments) { - if (Option.isNone(address)) { - shardIds.push(shard) - } - } - return shardIds - } - - private get runnerVersions(): Array { - const runnerVersions: Array = [] - for (const [, meta] of this.allRunners) { - runnerVersions.push(meta.runner.version) - } - return runnerVersions - } -} - -/** @internal */ -export interface RunnerWithMetadata { - readonly runner: Runner - readonly registeredAt: number -} -/** @internal */ -export const RunnerWithMetadata = (runner: RunnerWithMetadata): RunnerWithMetadata => runner - -/** @internal */ -export function decideAssignmentsForShards(state: State, group: string): readonly [ - assignments: MutableHashMap.MutableHashMap>, - unassignments: MutableHashMap.MutableHashMap>, - changes: MutableHashSet.MutableHashSet -] { - const shardsPerRunner = state.shardsPerRunner(group) - const maxVersion = state.maxVersion - const shardsToRebalance = state.unassignedShards(group) - - if (state.allRunnersHaveVersion(maxVersion)) { - const averageShardsPerRunner = state.averageShardsPerRunner(group) - MutableHashMap.forEach(shardsPerRunner, (shards) => { - const extraShards = Math.max(0, shards.size - averageShardsPerRunner) - const iter = shards.values() - for (let i = 0; i < extraShards; i++) { - const shard = iter.next() - if (shard.done) break - shardsToRebalance.push(shard.value) - } - }) - } - - return pickNewRunners(shardsToRebalance, state, group, shardsPerRunner, maxVersion) -} - -function pickNewRunners( - shardsToRebalance: ReadonlyArray, - state: State, - group: string, - shardsPerRunner: MutableHashMap.MutableHashMap>, - maybeMaxVersion = state.maxVersion -): readonly [ - assignments: MutableHashMap.MutableHashMap>, - unassignments: MutableHashMap.MutableHashMap>, - changes: MutableHashSet.MutableHashSet -] { - const addressAssignments = MutableHashMap.empty>() - const unassignments = MutableHashMap.empty>() - const changes = MutableHashSet.empty() - - if (Option.isNone(maybeMaxVersion)) { - return [addressAssignments, unassignments, changes] - } - const maxVersion = maybeMaxVersion.value - - const runnerGroup = state.runners.get(group)! - const shardsGroup = state.shards.get(group)! - - for (const shardId of shardsToRebalance) { - // Find the runner with the fewest assigned shards - let candidate: RunnerAddress | undefined - let candidateShards: Set | undefined - - for (const [address, shards] of shardsPerRunner) { - // Keep only runners with the maximum version - const maybeRunnerMeta = MutableHashMap.get(runnerGroup, address) - if (Option.isNone(maybeRunnerMeta)) continue - const runnerMeta = maybeRunnerMeta.value - if (runnerMeta.runner.version !== maxVersion) continue - - // Do not assign to a runner that has unassignments in the same rebalance - if (MutableHashMap.has(unassignments, address)) continue - - if (candidate === undefined || shards.size < candidateShards!.size) { - candidate = address - candidateShards = shards - } - } - if (!candidate || !candidateShards) break - - // If the old runner is the same as the new runner, do nothing - const oldRunner = Option.getOrUndefined(shardsGroup.get(shardId) ?? Option.none()) - if (oldRunner && oldRunner.toString() === candidate.toString()) { - continue - } - const oldShards = oldRunner && Option.getOrUndefined(MutableHashMap.get(shardsPerRunner, oldRunner)) - - // If the new runner has one less, as many, or more shards than the - // old runner, do not change anything - if (oldShards && candidateShards.size + 1 >= oldShards.size) continue - - // Otherwise create a new assignment - MutableHashMap.modifyAt( - addressAssignments, - candidate, - Option.match({ - onNone: () => Option.some(new Set([shardId])), - onSome: (shards) => { - shards.add(shardId) - return Option.some(shards) - } - }) - ) - if (oldRunner) { - MutableHashMap.modifyAt( - unassignments, - oldRunner, - Option.match({ - onNone: () => Option.some(new Set([shardId])), - onSome: (shards) => { - shards.add(shardId) - return Option.some(shards) - } - }) - ) - } - - // Move the shard to the new runner - candidateShards.add(shardId) - if (oldShards) { - oldShards.delete(shardId) - } - - // Track changes - MutableHashSet.add(changes, candidate) - if (oldRunner) MutableHashSet.add(changes, oldRunner) - } - - return [addressAssignments, unassignments, changes] -} - -/** @internal */ -export const addAllNested = ( - self: MutableHashMap.MutableHashMap>, - key: K, - values: Iterable -) => { - const oset = MutableHashMap.get(self, key) - if (Option.isSome(oset)) { - for (const value of values) { - MutableHashSet.add(oset.value, value) - } - } else { - MutableHashMap.set(self, key, MutableHashSet.fromIterable(values)) - } -} diff --git a/packages/cluster/test/ClusterWorkflowEngine.test.ts b/packages/cluster/test/ClusterWorkflowEngine.test.ts index 693de211524..f48ee42a450 100644 --- a/packages/cluster/test/ClusterWorkflowEngine.test.ts +++ b/packages/cluster/test/ClusterWorkflowEngine.test.ts @@ -1,17 +1,11 @@ -import { - ClusterWorkflowEngine, - MessageStorage, - Runners, - Sharding, - ShardingConfig, - ShardManager, - ShardStorage -} from "@effect/cluster" +import { ClusterWorkflowEngine, MessageStorage, Runners, Sharding, ShardingConfig } from "@effect/cluster" import { assert, describe, expect, it } from "@effect/vitest" import { Activity, DurableClock, DurableDeferred, Workflow } from "@effect/workflow" import { WorkflowInstance } from "@effect/workflow/WorkflowEngine" import { DateTime, Effect, Exit, Fiber, Layer, Schema, TestClock } from "effect" import * as Cause from "effect/Cause" +import * as RunnerHealth from "../src/RunnerHealth.js" +import * as RunnerStorage from "../src/RunnerStorage.js" describe.concurrent("ClusterWorkflowEngine", () => { it.effect("should run a workflow", () => @@ -205,6 +199,7 @@ describe.concurrent("ClusterWorkflowEngine", () => { id: "123" }).pipe(Effect.fork) yield* TestClock.adjust(1) + yield* TestClock.adjust(5000) assert.isUndefined(flags.get("parent-end")) assert.isUndefined(flags.get("child-end")) @@ -235,6 +230,20 @@ describe.concurrent("ClusterWorkflowEngine", () => { assert.isTrue(flags.get("suspended")) assert.include(flags.get("cause"), "boom") }).pipe(Effect.provide(TestWorkflowLayer))) + + it.effect("catchAllCause activity", () => + Effect.gen(function*() { + const flags = yield* Flags + yield* TestClock.adjust(1) + + const fiber = yield* CatchWorkflow.execute({ + id: "" + }).pipe(Effect.fork) + yield* TestClock.adjust(1) + yield* Fiber.join(fiber) + + assert.isTrue(flags.get("catch")) + }).pipe(Effect.provide(TestWorkflowLayer))) }) const TestShardingConfig = ShardingConfig.layer({ @@ -247,10 +256,10 @@ const TestShardingConfig = ShardingConfig.layer({ const TestWorkflowEngine = ClusterWorkflowEngine.layer.pipe( Layer.provideMerge(Sharding.layer), - Layer.provide(ShardManager.layerClientLocal), - Layer.provide(ShardStorage.layerMemory), Layer.provide(Runners.layerNoop), Layer.provideMerge(MessageStorage.layerMemory), + Layer.provide(RunnerStorage.layerMemory), + Layer.provide(RunnerHealth.layerNoop), Layer.provide(TestShardingConfig) ) @@ -509,12 +518,41 @@ const SuspendOnFailureWorkflowLayer = SuspendOnFailureWorkflow.toLayer(Effect.fn }) })) +const CatchWorkflow = Workflow.make({ + name: "CatchWorkflow", + payload: { + id: Schema.String + }, + idempotencyKey(payload) { + return payload.id + } +}) + +const CatchWorkflowLayer = CatchWorkflow.toLayer(Effect.fnUntraced(function*() { + const flags = yield* Flags + yield* Activity.make({ + name: "fail", + execute: Effect.die("boom") + }).pipe( + Effect.catchAllCause((cause) => + Activity.make({ + name: "log", + execute: Effect.suspend(() => { + flags.set("catch", true) + return Effect.log(cause) + }) + }) + ) + ) +})) + const TestWorkflowLayer = EmailWorkflowLayer.pipe( Layer.merge(RaceWorkflowLayer), Layer.merge(DurableRaceWorkflowLayer), Layer.merge(ParentWorkflowLayer), Layer.merge(ChildWorkflowLayer), Layer.merge(SuspendOnFailureWorkflowLayer), + Layer.merge(CatchWorkflowLayer), Layer.provideMerge(Flags.Default), Layer.provideMerge(TestWorkflowEngine) ) diff --git a/packages/cluster/test/MessageStorage.test.ts b/packages/cluster/test/MessageStorage.test.ts index 10dacc967ed..f58130f7a31 100644 --- a/packages/cluster/test/MessageStorage.test.ts +++ b/packages/cluster/test/MessageStorage.test.ts @@ -14,6 +14,7 @@ import { Headers } from "@effect/platform" import { Rpc, RpcSchema } from "@effect/rpc" import { describe, expect, it } from "@effect/vitest" import { Context, Effect, Exit, Layer, Option, PrimaryKey, Schema } from "effect" +import * as TestClock from "effect/TestClock" const MemoryLive = MessageStorage.layerMemory.pipe( Layer.provideMerge(Snowflake.layerGenerator), @@ -79,32 +80,32 @@ describe("MessageStorage", () => { const latch = yield* Effect.makeLatch() const request = yield* makeRequest() yield* storage.saveRequest(request) - yield* storage.registerReplyHandler( + const fiber = yield* storage.registerReplyHandler( new Message.OutgoingRequest({ ...request, respond: () => latch.open - }), - Effect.void - ) + }) + ).pipe(Effect.fork) + yield* TestClock.adjust(1) yield* storage.saveReply(yield* makeReply(request)) yield* latch.await + yield* fiber.await }).pipe(Effect.provide(MemoryLive))) it.effect("unregisterReplyHandler", () => Effect.gen(function*() { const storage = yield* MessageStorage.MessageStorage - const latch = yield* Effect.makeLatch() const request = yield* makeRequest() yield* storage.saveRequest(request) - yield* storage.registerReplyHandler( + const fiber = yield* storage.registerReplyHandler( new Message.OutgoingRequest({ ...request, respond: () => Effect.void - }), - latch.open - ) + }) + ).pipe(Effect.fork) + yield* TestClock.adjust(1) yield* storage.unregisterReplyHandler(request.envelope.requestId) - yield* latch.await + yield* fiber.await }).pipe(Effect.provide(MemoryLive))) }) }) diff --git a/packages/cluster/test/ShardManager.bench.ts b/packages/cluster/test/ShardManager.bench.ts deleted file mode 100644 index fc568359e81..00000000000 --- a/packages/cluster/test/ShardManager.bench.ts +++ /dev/null @@ -1,143 +0,0 @@ -import { - MessageStorage, - Runner, - RunnerAddress, - RunnerHealth, - Runners, - ShardingConfig, - ShardManager, - ShardStorage -} from "@effect/cluster" -import { bench, describe, expect } from "@effect/vitest" -import { Array, Data, Effect, Layer, Logger, MutableHashMap, Option, TestClock, TestContext } from "effect" -import { decideAssignmentsForShards, RunnerWithMetadata, State } from "../src/internal/shardManager.js" - -describe("ShardManager", () => { - const shards300 = Array.makeBy( - 300, - (i) => [i + 1, Option.none()] as const - ) - const state30 = makeDefaultState( - MutableHashMap.fromIterable(Array.makeBy(30, (i) => { - const address = RunnerAddress.make(`${i}`, i) - const meta = RunnerWithMetadata({ - runner: Runner.Runner.make({ address, groups: ["default"], version: 1 }), - registeredAt: Date.now() - }) - return [address, meta] as const - })), - new Map(shards300) - ) - - const shards1000 = Array.makeBy( - 1000, - (i) => [i + 1, Option.none()] as const - ) - const state100 = makeDefaultState( - MutableHashMap.fromIterable(Array.makeBy(100, (i) => { - const address = RunnerAddress.make(`${i}`, i) - const meta = RunnerWithMetadata({ - runner: Runner.make({ address, groups: ["default"], version: 1 }), - registeredAt: Date.now() - }) - return [address, meta] as const - })), - new Map(shards1000) - ) - - bench("decideAssignmentsForShards - 30 runners 300 shards", () => { - decideAssignmentsForShards(state30, "default") - }) - - bench("decideAssignmentsForShards - 100 runners 1000 shards", () => { - decideAssignmentsForShards(state100, "default") - }) - - const ShardManagerLive = ShardManager.layer.pipe( - Layer.provide(ShardManager.layerConfig({ - rebalanceDebounce: 0 - })) - ) - const RunnerHealthLive = RunnerHealth.layer.pipe( - Layer.provideMerge(Runners.layerNoop) - ) - const TestLive = ShardManagerLive.pipe( - Layer.provideMerge(Layer.mergeAll( - ShardStorage.layerNoop, - RunnerHealthLive - )), - Layer.provide(ShardingConfig.layer()), - Layer.provide(MessageStorage.layerNoop), - Layer.provideMerge(TestContext.TestContext), - Layer.provideMerge(Logger.remove(Logger.defaultLogger)) - ) - - bench("ShardManager - 50 runners up & down", () => - Effect.gen(function*() { - const manager = yield* ShardManager.ShardManager - - yield* simulate(Array.range(1, 50).map(registerRunner)) - yield* TestClock.adjust("20 seconds") - - const assignments = yield* manager.getAssignments - const values = globalThis.Array.from(assignments, ([, address]) => address) - const allRunnersAssigned = Array.every(values, Option.isSome) - expect(allRunnersAssigned).toBe(true) - - yield* simulate(Array.range(1, 50).map(unregisterRunner)) - yield* TestClock.adjust("1 second") - - const assignments2 = yield* manager.getAssignments - const values2 = globalThis.Array.from(assignments2, ([, address]) => address) - const allRunnersUnassigned = Array.every(values2, Option.isNone) - expect(allRunnersUnassigned).toBe(true) - }).pipe( - Effect.provide(TestLive), - Effect.runPromise - )) -}) - -function registerRunner(n: number) { - const runner = Runner.make({ - address: RunnerAddress.make("server", n), - groups: ["default"], - version: 1 - }) - return SimulationEvent.RegisterRunner({ runner }) -} -function unregisterRunner(n: number) { - const address = RunnerAddress.make("server", n) - return SimulationEvent.UnregisterRunner({ address }) -} - -type SimulationEvent = Data.TaggedEnum<{ - readonly RegisterRunner: { readonly runner: Runner.Runner } - readonly UnregisterRunner: { readonly address: RunnerAddress.RunnerAddress } -}> -const SimulationEvent = Data.taggedEnum() - -const handleEvent = SimulationEvent.$match({ - RegisterRunner: ({ runner }) => - ShardManager.ShardManager.pipe( - Effect.flatMap((manager) => manager.register(runner)) - ), - UnregisterRunner: ({ address }) => - ShardManager.ShardManager.pipe( - Effect.flatMap((manager) => manager.unregister(address)) - ) -}) - -function simulate(events: ReadonlyArray) { - return Effect.forEach(events, handleEvent, { discard: true }) -} - -const makeDefaultState = ( - runners: MutableHashMap.MutableHashMap, - shards: Map> -) => - new State( - runners, - new Map([["default", runners]]), - new Map([["default", shards]]), - shards.size - ) diff --git a/packages/cluster/test/ShardManager.test.ts b/packages/cluster/test/ShardManager.test.ts deleted file mode 100644 index 057f68531d1..00000000000 --- a/packages/cluster/test/ShardManager.test.ts +++ /dev/null @@ -1,357 +0,0 @@ -import type { ShardId } from "@effect/cluster" -import { - MessageStorage, - Runner, - RunnerAddress, - RunnerHealth, - Runners, - ShardingConfig, - ShardManager, - ShardStorage -} from "@effect/cluster" -import { describe, expect, it } from "@effect/vitest" -import { Array, Data, Effect, Fiber, Iterable, Layer, MutableHashMap, Option, pipe, TestClock } from "effect" -import { decideAssignmentsForShards, RunnerWithMetadata, State } from "../src/internal/shardManager.js" - -const runner1 = RunnerWithMetadata({ - runner: Runner.make({ address: RunnerAddress.make("1", 1), groups: ["default"], version: 1 }), - registeredAt: Number.MIN_SAFE_INTEGER -}) -const runner2 = RunnerWithMetadata({ - runner: Runner.make({ address: RunnerAddress.make("2", 2), groups: ["default", "custom"], version: 1 }), - registeredAt: Number.MIN_SAFE_INTEGER -}) -const runner3 = RunnerWithMetadata({ - runner: Runner.make({ address: RunnerAddress.make("3", 3), groups: ["default", "custom"], version: 1 }), - registeredAt: Number.MIN_SAFE_INTEGER -}) - -describe("ShardManager", () => { - describe("Rebalancing", () => { - it("should rebalance unbalanced assignments", () => { - const state = makeDefaultState( - MutableHashMap.make( - [runner1.runner.address, runner1], - [runner2.runner.address, runner2] - ), - new Map([ - [1, Option.some(runner1.runner.address)], - [2, Option.some(runner1.runner.address)] - ]) - ) - const [assignments, unassignments] = decideAssignmentsForShards(state, "default") - expect(MutableHashMap.has(assignments, runner2.runner.address)).toBe(true) - expect(MutableHashMap.size(assignments)).toBe(1) - expect(MutableHashMap.has(unassignments, runner1.runner.address)).toBe(true) - expect(MutableHashMap.size(unassignments)).toBe(1) - }) - - it("should not rebalance to runners with an older version", () => { - const oldRunner2 = RunnerWithMetadata({ - runner: Runner.make({ address: runner2.runner.address, groups: ["default"], version: 0 }), - registeredAt: runner2.registeredAt - }) - const state = makeDefaultState( - MutableHashMap.make( - [runner1.runner.address, runner1], - [runner2.runner.address, oldRunner2] - ), - new Map([ - [1, Option.some(runner1.runner.address)], - [2, Option.some(runner1.runner.address)] - ]) - ) - const [assignments, unassignments] = decideAssignmentsForShards(state, "default") - expect(MutableHashMap.size(assignments) === 0).toBe(true) - expect(MutableHashMap.size(unassignments) === 0).toBe(true) - }) - - it("should not rebalance when already well-balanced", () => { - const state = makeDefaultState( - MutableHashMap.make( - [runner1.runner.address, runner1], - [runner2.runner.address, runner2] - ), - new Map([ - [1, Option.some(runner1.runner.address)], - [2, Option.some(runner2.runner.address)] - ]) - ) - const [assignments, unassignments] = decideAssignmentsForShards(state, "default") - expect(MutableHashMap.isEmpty(assignments)).toBe(true) - expect(MutableHashMap.isEmpty(unassignments)).toBe(true) - }) - - it("should not rebalance when there is only a one-shard difference", () => { - const state = makeDefaultState( - MutableHashMap.make( - [runner1.runner.address, runner1], - [runner2.runner.address, runner2] - ), - new Map([ - [1, Option.some(runner1.runner.address)], - [2, Option.some(runner1.runner.address)], - [3, Option.some(runner2.runner.address)] - ]) - ) - const [assignments, unassignments] = decideAssignmentsForShards(state, "default") - expect(MutableHashMap.isEmpty(assignments)).toBe(true) - expect(MutableHashMap.isEmpty(unassignments)).toBe(true) - }) - - it("should rebalance when there is more than a one-shard difference", () => { - const state = makeDefaultState( - MutableHashMap.make( - [runner1.runner.address, runner1], - [runner2.runner.address, runner2] - ), - new Map([ - [1, Option.some(runner1.runner.address)], - [2, Option.some(runner1.runner.address)], - [3, Option.some(runner1.runner.address)], - [4, Option.some(runner2.runner.address)] - ]) - ) - const [assignments, unassignments] = decideAssignmentsForShards(state, "default") - expect(MutableHashMap.has(assignments, runner2.runner.address)).toBe(true) - expect(MutableHashMap.size(assignments)).toBe(1) - expect(MutableHashMap.has(unassignments, runner1.runner.address)).toBe(true) - expect(MutableHashMap.size(unassignments)).toBe(1) - }) - - it("should pick the runner with less shards", () => { - const state = makeDefaultState( - MutableHashMap.make( - [runner1.runner.address, runner1], - [runner2.runner.address, runner2], - [runner3.runner.address, runner3] - ), - new Map([ - [1, Option.some(runner1.runner.address)], - [2, Option.some(runner1.runner.address)], - [3, Option.some(runner2.runner.address)] - ]) - ) - const [assignments, unassignments] = decideAssignmentsForShards(state, "default") - expect(MutableHashMap.has(assignments, runner3.runner.address)).toBe(true) - expect(MutableHashMap.size(assignments)).toBe(1) - expect(MutableHashMap.has(unassignments, runner1.runner.address)).toBe(true) - expect(MutableHashMap.size(unassignments)).toBe(1) - }) - - it("should not rebalance if there are no runners", () => { - const state = makeDefaultState( - MutableHashMap.empty(), - new Map([ - [1, Option.some(runner1.runner.address)] - ]) - ) - const [assignments, unassignments] = decideAssignmentsForShards(state, "default") - expect(MutableHashMap.isEmpty(assignments)).toBe(true) - expect(MutableHashMap.isEmpty(unassignments)).toBe(true) - }) - }) - - describe.concurrent("Simulation", () => { - const ShardManagerLive = ShardManager.layer.pipe( - Layer.provide(ShardManager.layerConfig({ - rebalanceDebounce: 500, - rebalanceInterval: "20 seconds" - })) - ) - const RunnerHealthLive = RunnerHealth.layer.pipe( - Layer.provideMerge(Runners.layerNoop) - ) - const TestLive = ShardManagerLive.pipe( - Layer.provideMerge(Layer.mergeAll( - ShardStorage.layerNoop, - RunnerHealthLive - )), - Layer.provide([ - MessageStorage.layerNoop, - ShardingConfig.layer({ - shardGroups: ["custom"] - }) - ]) - ) - - it.effect("should successfully scale up", () => - Effect.gen(function*() { - const manager = yield* ShardManager.ShardManager - - // Setup 20 runners first - yield* simulate(Array.range(1, 20).map(registerRunner)) - - // Check that all runners are assigned and have 15 shards each - const assignments = yield* manager.getAssignments - const values = globalThis.Array.from(assignments, ([, address]) => address) - const allRunnersAssigned = Array.every(values, Option.isSome) - expect(allRunnersAssigned).toBe(true) - let shardsPerRunner = getShardsPerRunner(assignments, "default") - expect(shardsPerRunner.every((shards) => shards.length === 15)).toBe(true) - shardsPerRunner = getShardsPerRunner(assignments, "custom") - expect(shardsPerRunner.every((shards) => shards.length === 30)).toBe(true) - - // Setup another 5 runners - yield* simulate(Array.range(21, 25).map(registerRunner)) - yield* TestClock.adjust("20 seconds") - - // Check that each of the new runners received 6 shards - const assignments2 = yield* manager.getAssignments.pipe( - Effect.map(Iterable.filter(([shardId, address]) => - shardId.group === "default" && Option.isSome(address) && address.value.port > 20 - )) - ) - const shardsPerRunner2 = getShardsPerRunner(assignments2, "default") - expect(shardsPerRunner2.every((shards) => - shards.length === 12 - )).toBe(true) - }).pipe(Effect.provide(TestLive)), 10_000) - - it.effect("should succcessfully scale down", () => - Effect.gen(function*() { - const manager = yield* ShardManager.ShardManager - - // Setup 25 runners - yield* simulate(Array.range(1, 25).map(registerRunner)) - - // Check that all runners are assigned and have 12 shards each - const assignments = yield* manager.getAssignments - const values = globalThis.Array.from(assignments, ([, address]) => address) - const allRunnersAssigned = Array.every(values, Option.isSome) - expect(allRunnersAssigned).toBe(true) - let shardsPerRunner = getShardsPerRunner(assignments, "default") - expect(shardsPerRunner.every((shards) => shards.length === 12)).toBe(true) - shardsPerRunner = getShardsPerRunner(assignments, "custom") - expect(shardsPerRunner.every((shards) => shards.length === 25)).toBe(true) - - // Remove 5 runners - yield* simulate(Array.range(21, 25).map(unregisterRunner)) - yield* TestClock.adjust("1 second") - - // Check that all shards have already been rebalanced - const assignments2 = yield* manager.getAssignments - const allRunnersUnassigned = pipe( - Array.fromIterable(assignments2), - Array.every(([, address]) => Option.isSome(address) && address.value.port <= 20) - ) - expect(allRunnersUnassigned).toBe(true) - const shardsPerRunner2 = getShardsPerRunner(assignments2, "default") - expect(shardsPerRunner2.every((shards) => shards.length === 15)).toBe(true) - }).pipe(Effect.provide(TestLive)), 10_000) - - it.effect("should save state to storage when restarted", () => - Effect.gen(function*() { - const setup = Effect.gen(function*() { - const storage = yield* ShardStorage.ShardStorage - - yield* simulate(Array.range(1, 10).map(registerRunner)) - yield* TestClock.adjust("20 seconds") - - // Wait for the forked daemon fibers to do their work - yield* Effect.iterate(Array.empty<[ShardId.ShardId, Option.Option]>(), { - while: Array.isEmptyArray, - body: () => storage.getAssignments - }) - yield* Effect.iterate(Array.empty<[RunnerAddress.RunnerAddress, Runner.Runner]>(), { - while: Array.isEmptyArray, - body: () => storage.getRunners - }) - // Simulate a non-persistent storage restart - yield* storage.saveAssignments([]) - yield* storage.saveRunners([]) - }).pipe( - Effect.provide( - ShardManagerLive.pipe( - Layer.provide(RunnerHealthLive), - Layer.provide([MessageStorage.layerNoop, ShardingConfig.layer()]) - ) - ) - ) - - const test = Effect.gen(function*() { - const storage = yield* ShardStorage.ShardStorage - const shutdownAssignments = yield* storage.getAssignments - const shutdownRunners = yield* storage.getRunners - // ShardManager should have saved its state to persistent storage - // as part of shutdown procedures - expect(shutdownAssignments.length === 0).toBe(false) - expect(Array.isEmptyArray(shutdownRunners)).toBe(false) - }) - - yield* setup - yield* test - }).pipe(Effect.provide(ShardStorage.layerMemory)), 10_000) - }) -}) - -function registerRunner(n: number) { - const runner = Runner.make({ - address: RunnerAddress.make("server", n), - groups: n % 2 === 0 ? ["default", "custom"] : ["default"], - version: 1 - }) - return SimulationEvent.RegisterRunner({ runner }) -} -function unregisterRunner(n: number) { - const address = RunnerAddress.make("server", n) - return SimulationEvent.UnregisterRunner({ address }) -} - -function getShardsPerRunner( - assignments: Iterable]>, - group: string -) { - const shardsPerRunner = MutableHashMap.empty>() - for (const [shard, address] of assignments) { - if (shard.group !== group || Option.isNone(address)) continue - MutableHashMap.modifyAt( - shardsPerRunner, - address.value, - Option.match({ - onNone: () => Option.some(Array.of(shard.id)), - onSome: (shards) => Option.some(Array.append(shards, shard.id)) - }) - ) - } - return MutableHashMap.values(shardsPerRunner) -} - -type SimulationEvent = Data.TaggedEnum<{ - readonly RegisterRunner: { readonly runner: Runner.Runner } - readonly UnregisterRunner: { readonly address: RunnerAddress.RunnerAddress } -}> -const SimulationEvent = Data.taggedEnum() - -const handleEvent = SimulationEvent.$match({ - RegisterRunner: ({ runner }) => - ShardManager.ShardManager.pipe( - Effect.flatMap((manager) => manager.register(runner)) - ), - UnregisterRunner: ({ address }) => - ShardManager.ShardManager.pipe( - Effect.flatMap((manager) => manager.unregister(address)) - ) -}) - -const simulate = Effect.fnUntraced(function*(events: ReadonlyArray) { - const fiber = yield* Effect.forEach(events, handleEvent, { concurrency: events.length, discard: true }).pipe( - Effect.fork - ) - yield* TestClock.adjust(1) - // Wait for shard manager to debounce rebalancing - yield* TestClock.adjust(500) - yield* TestClock.adjust(500) - yield* Fiber.join(fiber) -}) - -const makeDefaultState = ( - runners: MutableHashMap.MutableHashMap, - shards: Map> -) => - new State( - runners, - new Map([["default", runners]]), - new Map([["default", shards]]), - shards.size - ) diff --git a/packages/cluster/test/Sharding.test.ts b/packages/cluster/test/Sharding.test.ts index 24259fdc311..c35215a21e1 100644 --- a/packages/cluster/test/Sharding.test.ts +++ b/packages/cluster/test/Sharding.test.ts @@ -2,10 +2,9 @@ import { MessageStorage, RunnerAddress, Runners, + RunnerStorage, Sharding, ShardingConfig, - ShardManager, - ShardStorage, Snowflake } from "@effect/cluster" import { assert, describe, expect, it } from "@effect/vitest" @@ -24,6 +23,7 @@ import { Stream, TestClock } from "effect" +import * as RunnerHealth from "../src/RunnerHealth.js" import { TestEntity, TestEntityNoState, TestEntityState, User } from "./TestEntity.js" describe.concurrent("Sharding", () => { @@ -91,13 +91,12 @@ describe.concurrent("Sharding", () => { yield* Effect.gen(function*() { const makeClient = yield* TestEntity.client - yield* TestClock.adjust(1) const client = makeClient("1") const fiber = yield* client.NeverVolatile().pipe(Effect.fork) yield* TestClock.adjust(1) const config = yield* ShardingConfig.ShardingConfig ;(config as any).runnerAddress = Option.some(RunnerAddress.make("localhost", 1234)) - setTimeout(() => { + fiber.currentScheduler.scheduleTask(() => { fiber.unsafeInterruptAsFork(FiberId.none) Effect.runFork(testClock.adjust(30000)) }, 0) @@ -365,10 +364,7 @@ describe.concurrent("Sharding", () => { // add response yield* state.messages.offer(void 0) - yield* Effect.gen(function*() { - // Let the shards get assigned and storage poll - yield* TestClock.adjust(5000) - }).pipe( + yield* TestClock.adjust(5000).pipe( Effect.provide(EnvLayer), Effect.scoped ) @@ -500,17 +496,6 @@ describe.concurrent("Sharding", () => { Layer.provide(MessageStorage.layerNoop) )))) - it.scoped("EntityNotManagedByRunner", () => - Effect.gen(function*() { - yield* TestClock.adjust(1) - const makeClient = yield* TestEntity.client - const client = makeClient("1") - const error = yield* client.GetUser({ id: 123 }).pipe( - Effect.flip - ) - expect(error._tag).toEqual("EntityNotManagedByRunner") - }).pipe(Effect.provide(TestShardingWithoutEntities))) - it.scoped("restart on defect", () => Effect.gen(function*() { yield* TestClock.adjust(1) @@ -533,8 +518,8 @@ const TestShardingConfig = ShardingConfig.layer({ const TestShardingWithoutState = TestEntityNoState.pipe( Layer.provideMerge(Sharding.layer), - Layer.provide(ShardManager.layerClientLocal), - Layer.provide(ShardStorage.layerMemory) + Layer.provide(RunnerStorage.layerMemory), + Layer.provide(RunnerHealth.layerNoop) // Layer.provide(Logger.minimumLogLevel(LogLevel.All)), // Layer.provideMerge(Logger.pretty) ) @@ -552,11 +537,3 @@ const TestSharding = TestShardingWithoutStorage.pipe( Layer.provideMerge(MessageStorage.layerMemory), Layer.provide(TestShardingConfig) ) - -const TestShardingWithoutEntities = Sharding.layer.pipe( - Layer.provide(ShardManager.layerClientLocal), - Layer.provide(ShardStorage.layerMemory), - Layer.provide(Runners.layerNoop), - Layer.provideMerge(MessageStorage.layerMemory), - Layer.provide(TestShardingConfig) -) diff --git a/packages/cluster/test/SqlMessageStorage.test.ts b/packages/cluster/test/SqlMessageStorage.test.ts index 5474b8d23a3..d50e471f654 100644 --- a/packages/cluster/test/SqlMessageStorage.test.ts +++ b/packages/cluster/test/SqlMessageStorage.test.ts @@ -174,15 +174,16 @@ describe("SqlMessageStorage", () => { const latch = yield* Effect.makeLatch() const request = yield* makeRequest() yield* storage.saveRequest(request) - yield* storage.registerReplyHandler( + const fiber = yield* storage.registerReplyHandler( new Message.OutgoingRequest({ ...request, respond: () => latch.open - }), - Effect.void - ) + }) + ).pipe(Effect.fork) + yield* TestClock.adjust(1) yield* storage.saveReply(yield* makeReply(request)) yield* latch.await + yield* fiber.await })) it.effect("unprocessedMessagesById", () => diff --git a/packages/cluster/test/SqlRunnerStorage.test.ts b/packages/cluster/test/SqlRunnerStorage.test.ts new file mode 100644 index 00000000000..730bdaae1b0 --- /dev/null +++ b/packages/cluster/test/SqlRunnerStorage.test.ts @@ -0,0 +1,82 @@ +import { Runner, RunnerAddress, RunnerStorage, ShardId, SqlRunnerStorage } from "@effect/cluster" +import { FileSystem } from "@effect/platform" +import { NodeFileSystem } from "@effect/platform-node" +import { SqliteClient } from "@effect/sql-sqlite-node" +import { describe, expect, it } from "@effect/vitest" +import { Effect, Layer } from "effect" +import * as ShardingConfig from "../src/ShardingConfig.js" +import { MysqlContainer } from "./fixtures/utils-mysql.js" +import { PgContainer } from "./fixtures/utils-pg.js" + +const StorageLive = SqlRunnerStorage.layer + +describe("SqlRunnerStorage", () => { + ;([ + ["pg", Layer.orDie(PgContainer.ClientLive)], + ["mysql", Layer.orDie(MysqlContainer.ClientLive)], + ["sqlite", Layer.orDie(SqliteLayer)] + ] as const).forEach(([label, layer]) => { + it.layer(StorageLive.pipe(Layer.provideMerge(layer), Layer.provide(ShardingConfig.layer())), { + timeout: 60000 + })(label, (it) => { + it.effect("getRunners", () => + Effect.gen(function*() { + const storage = yield* RunnerStorage.RunnerStorage + + const runner = Runner.make({ + address: runnerAddress1, + groups: ["default"], + weight: 1 + }) + const machineId = yield* storage.register(runner, true) + yield* storage.register(runner, true) + expect(machineId).toEqual(1) + expect(yield* storage.getRunners).toEqual([[runner, true]]) + + yield* storage.setRunnerHealth(runnerAddress1, false) + expect(yield* storage.getRunners).toEqual([[runner, false]]) + + yield* storage.unregister(runnerAddress1) + expect(yield* storage.getRunners).toEqual([]) + })) + + it.effect("acquireShards", () => + Effect.gen(function*() { + const storage = yield* RunnerStorage.RunnerStorage + + let acquired = yield* storage.acquire(runnerAddress1, [ + ShardId.make("default", 1), + ShardId.make("default", 2), + ShardId.make("default", 3) + ]) + expect(acquired.map((_) => _.id)).toEqual([1, 2, 3]) + acquired = yield* storage.acquire(runnerAddress1, [ + ShardId.make("default", 1), + ShardId.make("default", 2), + ShardId.make("default", 3) + ]) + expect(acquired.map((_) => _.id)).toEqual([1, 2, 3]) + + const refreshed = yield* storage.refresh(runnerAddress1, [ + ShardId.make("default", 1), + ShardId.make("default", 2), + ShardId.make("default", 3) + ]) + expect(refreshed.map((_) => _.id)).toEqual([1, 2, 3]) + + // smoke test release + yield* storage.release(runnerAddress1, ShardId.make("default", 2)) + })) + }) + }) +}) + +const runnerAddress1 = RunnerAddress.make("localhost", 1234) + +const SqliteLayer = Effect.gen(function*() { + const fs = yield* FileSystem.FileSystem + const dir = yield* fs.makeTempDirectoryScoped() + return SqliteClient.layer({ + filename: dir + "/test.db" + }) +}).pipe(Layer.unwrapScoped, Layer.provide(NodeFileSystem.layer)) diff --git a/packages/cluster/test/SqlShardStorage.test.ts b/packages/cluster/test/SqlShardStorage.test.ts deleted file mode 100644 index d31b4ade544..00000000000 --- a/packages/cluster/test/SqlShardStorage.test.ts +++ /dev/null @@ -1,104 +0,0 @@ -import { Runner, RunnerAddress, ShardId, ShardStorage, SqlShardStorage } from "@effect/cluster" -import { FileSystem } from "@effect/platform" -import { NodeFileSystem } from "@effect/platform-node" -import { SqliteClient } from "@effect/sql-sqlite-node" -import { describe, expect, it } from "@effect/vitest" -import { Effect, Equal, Layer, MutableHashSet, Option } from "effect" -import { MysqlContainer } from "./fixtures/utils-mysql.js" -import { PgContainer } from "./fixtures/utils-pg.js" - -const StorageLive = SqlShardStorage.layer - -describe("SqlMessageStorage", () => { - ;([ - ["pg", Layer.orDie(PgContainer.ClientLive)], - ["mysql", Layer.orDie(MysqlContainer.ClientLive)], - ["sqlite", Layer.orDie(SqliteLayer)] - ] as const).forEach(([label, layer]) => { - it.layer(StorageLive.pipe(Layer.provideMerge(layer)), { - timeout: 60000 - })(label, (it) => { - it.effect("saveRunners", () => - Effect.gen(function*() { - const storage = yield* ShardStorage.ShardStorage - - yield* storage.saveRunners([[ - runnerAddress1, - Runner.make({ - address: runnerAddress1, - groups: ["default"], - version: 1 - }) - ]]) - expect(yield* storage.getRunners).toEqual([[ - runnerAddress1, - Runner.make({ - address: runnerAddress1, - groups: ["default"], - version: 1 - }) - ]]) - }).pipe(Effect.repeatN(2))) - - it.effect("saveAssignments", () => - Effect.gen(function*() { - const storage = yield* ShardStorage.ShardStorage - - yield* storage.saveAssignments([ - [ShardId.make("default", 1), Option.some(runnerAddress1)], - [ShardId.make("default", 2), Option.none()] - ]) - expect(Equal.equals( - yield* storage.getAssignments, - MutableHashSet.fromIterable([ - [ShardId.make("default", 1), Option.some(runnerAddress1)], - [ShardId.make("default", 2), Option.none()] - ]) - )) - }).pipe(Effect.repeatN(2))) - - it.effect("acquireShards", () => - Effect.gen(function*() { - const storage = yield* ShardStorage.ShardStorage - - let acquired = yield* storage.acquire(runnerAddress1, [ - ShardId.make("default", 1), - ShardId.make("default", 2), - ShardId.make("default", 3) - ]) - expect(acquired.map((_) => _.id)).toEqual([1, 2, 3]) - acquired = yield* storage.acquire(runnerAddress1, [ - ShardId.make("default", 1), - ShardId.make("default", 2), - ShardId.make("default", 3) - ]) - expect(acquired.map((_) => _.id)).toEqual([1, 2, 3]) - - const refreshed = yield* storage.refresh(runnerAddress1, [ - ShardId.make("default", 1), - ShardId.make("default", 2), - ShardId.make("default", 3) - ]) - expect(refreshed.map((_) => _.id)).toEqual([1, 2, 3]) - - acquired = yield* storage.acquire(runnerAddress2, [ - ShardId.make("default", 1), - ShardId.make("default", 2), - ShardId.make("default", 3) - ]) - expect(acquired).toEqual([]) - })) - }) - }) -}) - -const runnerAddress1 = RunnerAddress.make("localhost", 1234) -const runnerAddress2 = RunnerAddress.make("localhost", 1235) - -const SqliteLayer = Effect.gen(function*() { - const fs = yield* FileSystem.FileSystem - const dir = yield* fs.makeTempDirectoryScoped() - return SqliteClient.layer({ - filename: dir + "/test.db" - }) -}).pipe(Layer.unwrapScoped, Layer.provide(NodeFileSystem.layer)) diff --git a/packages/effect/src/Effect.ts b/packages/effect/src/Effect.ts index f99123f7046..b1e897d82b1 100644 --- a/packages/effect/src/Effect.ts +++ b/packages/effect/src/Effect.ts @@ -13946,6 +13946,11 @@ export declare namespace Service { * @category Models */ export namespace fn { + /** + * @since 3.19.0 + * @category Models + */ + export type Return = Generator>, A, any> /** * @since 3.11.0 * @category Models diff --git a/packages/effect/src/Graph.ts b/packages/effect/src/Graph.ts index fdf5c69eca0..59f1cf509c0 100644 --- a/packages/effect/src/Graph.ts +++ b/packages/effect/src/Graph.ts @@ -14,18 +14,6 @@ import type { Pipeable } from "./Pipeable.js" import { pipeArguments } from "./Pipeable.js" import type { Mutable } from "./Types.js" -/** - * Safely get a value from a Map, returning an Option. - * Uses explicit key presence check with map.has() for better safety. - * @internal - */ -const getMapSafe = (map: Map, key: K): Option.Option => { - if (map.has(key)) { - return Option.some(map.get(key)!) - } - return Option.none() -} - /** * Unique identifier for Graph instances. * @@ -224,6 +212,23 @@ const ProtoGraph = { } } +// ============================================================================= +// Errors +// ============================================================================= + +/** + * Error thrown when a graph operation fails. + * + * @since 3.18.0 + * @category errors + */ +export class GraphError extends Data.TaggedError("GraphError")<{ + readonly message: string +}> {} + +/** @internal */ +const missingNode = (node: number) => new GraphError({ message: `Node ${node} does not exist` }) + // ============================================================================= // Constructors // ============================================================================= @@ -494,7 +499,7 @@ export const addNode = ( export const getNode = ( graph: Graph | MutableGraph, nodeIndex: NodeIndex -): Option.Option => getMapSafe(graph.nodes, nodeIndex) +): Option.Option => graph.nodes.has(nodeIndex) ? Option.some(graph.nodes.get(nodeIndex)!) : Option.none() /** * Checks if a node with the given index exists in the graph. @@ -1145,10 +1150,10 @@ export const addEdge = ( ): EdgeIndex => { // Validate that both nodes exist if (!mutable.nodes.has(source)) { - throw new Error(`Source node ${source} does not exist`) + throw missingNode(source) } if (!mutable.nodes.has(target)) { - throw new Error(`Target node ${target} does not exist`) + throw missingNode(target) } const edgeIndex = mutable.nextEdgeIndex @@ -1158,26 +1163,26 @@ export const addEdge = ( mutable.edges.set(edgeIndex, edgeData) // Update adjacency lists - const sourceAdjacency = getMapSafe(mutable.adjacency, source) - if (Option.isSome(sourceAdjacency)) { - sourceAdjacency.value.push(edgeIndex) + const sourceAdjacency = mutable.adjacency.get(source) + if (sourceAdjacency !== undefined) { + sourceAdjacency.push(edgeIndex) } - const targetReverseAdjacency = getMapSafe(mutable.reverseAdjacency, target) - if (Option.isSome(targetReverseAdjacency)) { - targetReverseAdjacency.value.push(edgeIndex) + const targetReverseAdjacency = mutable.reverseAdjacency.get(target) + if (targetReverseAdjacency !== undefined) { + targetReverseAdjacency.push(edgeIndex) } // For undirected graphs, add reverse connections if (mutable.type === "undirected") { - const targetAdjacency = getMapSafe(mutable.adjacency, target) - if (Option.isSome(targetAdjacency)) { - targetAdjacency.value.push(edgeIndex) + const targetAdjacency = mutable.adjacency.get(target) + if (targetAdjacency !== undefined) { + targetAdjacency.push(edgeIndex) } - const sourceReverseAdjacency = getMapSafe(mutable.reverseAdjacency, source) - if (Option.isSome(sourceReverseAdjacency)) { - sourceReverseAdjacency.value.push(edgeIndex) + const sourceReverseAdjacency = mutable.reverseAdjacency.get(source) + if (sourceReverseAdjacency !== undefined) { + sourceReverseAdjacency.push(edgeIndex) } } @@ -1224,17 +1229,17 @@ export const removeNode = ( const edgesToRemove: Array = [] // Get outgoing edges - const outgoingEdges = getMapSafe(mutable.adjacency, nodeIndex) - if (Option.isSome(outgoingEdges)) { - for (const edge of outgoingEdges.value) { + const outgoingEdges = mutable.adjacency.get(nodeIndex) + if (outgoingEdges !== undefined) { + for (const edge of outgoingEdges) { edgesToRemove.push(edge) } } // Get incoming edges - const incomingEdges = getMapSafe(mutable.reverseAdjacency, nodeIndex) - if (Option.isSome(incomingEdges)) { - for (const edge of incomingEdges.value) { + const incomingEdges = mutable.reverseAdjacency.get(nodeIndex) + if (incomingEdges !== undefined) { + for (const edge of incomingEdges) { edgesToRemove.push(edge) } } @@ -1293,45 +1298,45 @@ const removeEdgeInternal = ( edgeIndex: EdgeIndex ): boolean => { // Get edge data - const edge = getMapSafe(mutable.edges, edgeIndex) - if (Option.isNone(edge)) { + const edge = mutable.edges.get(edgeIndex) + if (edge === undefined) { return false // Edge doesn't exist, no mutation occurred } - const { source, target } = edge.value + const { source, target } = edge // Remove from adjacency lists - const sourceAdjacency = getMapSafe(mutable.adjacency, source) - if (Option.isSome(sourceAdjacency)) { - const index = sourceAdjacency.value.indexOf(edgeIndex) + const sourceAdjacency = mutable.adjacency.get(source) + if (sourceAdjacency !== undefined) { + const index = sourceAdjacency.indexOf(edgeIndex) if (index !== -1) { - sourceAdjacency.value.splice(index, 1) + sourceAdjacency.splice(index, 1) } } - const targetReverseAdjacency = getMapSafe(mutable.reverseAdjacency, target) - if (Option.isSome(targetReverseAdjacency)) { - const index = targetReverseAdjacency.value.indexOf(edgeIndex) + const targetReverseAdjacency = mutable.reverseAdjacency.get(target) + if (targetReverseAdjacency !== undefined) { + const index = targetReverseAdjacency.indexOf(edgeIndex) if (index !== -1) { - targetReverseAdjacency.value.splice(index, 1) + targetReverseAdjacency.splice(index, 1) } } // For undirected graphs, remove reverse connections if (mutable.type === "undirected") { - const targetAdjacency = getMapSafe(mutable.adjacency, target) - if (Option.isSome(targetAdjacency)) { - const index = targetAdjacency.value.indexOf(edgeIndex) + const targetAdjacency = mutable.adjacency.get(target) + if (targetAdjacency !== undefined) { + const index = targetAdjacency.indexOf(edgeIndex) if (index !== -1) { - targetAdjacency.value.splice(index, 1) + targetAdjacency.splice(index, 1) } } - const sourceReverseAdjacency = getMapSafe(mutable.reverseAdjacency, source) - if (Option.isSome(sourceReverseAdjacency)) { - const index = sourceReverseAdjacency.value.indexOf(edgeIndex) + const sourceReverseAdjacency = mutable.reverseAdjacency.get(source) + if (sourceReverseAdjacency !== undefined) { + const index = sourceReverseAdjacency.indexOf(edgeIndex) if (index !== -1) { - sourceReverseAdjacency.value.splice(index, 1) + sourceReverseAdjacency.splice(index, 1) } } } @@ -1375,7 +1380,7 @@ const removeEdgeInternal = ( export const getEdge = ( graph: Graph | MutableGraph, edgeIndex: EdgeIndex -): Option.Option> => getMapSafe(graph.edges, edgeIndex) +): Option.Option> => graph.edges.has(edgeIndex) ? Option.some(graph.edges.get(edgeIndex)!) : Option.none() /** * Checks if an edge exists between two nodes in the graph. @@ -1410,15 +1415,15 @@ export const hasEdge = ( source: NodeIndex, target: NodeIndex ): boolean => { - const adjacencyList = getMapSafe(graph.adjacency, source) - if (Option.isNone(adjacencyList)) { + const adjacencyList = graph.adjacency.get(source) + if (adjacencyList === undefined) { return false } // Check if any edge in the adjacency list connects to the target - for (const edgeIndex of adjacencyList.value) { - const edge = getMapSafe(graph.edges, edgeIndex) - if (Option.isSome(edge) && edge.value.target === target) { + for (const edgeIndex of adjacencyList) { + const edge = graph.edges.get(edgeIndex) + if (edge !== undefined && edge.target === target) { return true } } @@ -1493,16 +1498,16 @@ export const neighbors = ( return getUndirectedNeighbors(graph as any, nodeIndex) } - const adjacencyList = getMapSafe(graph.adjacency, nodeIndex) - if (Option.isNone(adjacencyList)) { + const adjacencyList = graph.adjacency.get(nodeIndex) + if (adjacencyList === undefined) { return [] } const result: Array = [] - for (const edgeIndex of adjacencyList.value) { - const edge = getMapSafe(graph.edges, edgeIndex) - if (Option.isSome(edge)) { - result.push(edge.value.target) + for (const edgeIndex of adjacencyList) { + const edge = graph.edges.get(edgeIndex) + if (edge !== undefined) { + result.push(edge.target) } } @@ -1544,19 +1549,19 @@ export const neighborsDirected = ( ? graph.reverseAdjacency : graph.adjacency - const adjacencyList = getMapSafe(adjacencyMap, nodeIndex) - if (Option.isNone(adjacencyList)) { + const adjacencyList = adjacencyMap.get(nodeIndex) + if (adjacencyList === undefined) { return [] } const result: Array = [] - for (const edgeIndex of adjacencyList.value) { - const edge = getMapSafe(graph.edges, edgeIndex) - if (Option.isSome(edge)) { + for (const edgeIndex of adjacencyList) { + const edge = graph.edges.get(edgeIndex) + if (edge !== undefined) { // For incoming direction, we want the source node instead of target const neighborNode = direction === "incoming" - ? edge.value.source - : edge.value.target + ? edge.source + : edge.target result.push(neighborNode) } } @@ -1568,6 +1573,18 @@ export const neighborsDirected = ( // GraphViz Export // ============================================================================= +/** + * Configuration options for GraphViz DOT format generation from graphs. + * + * @since 3.18.0 + * @category models + */ +export interface GraphVizOptions { + readonly nodeLabel?: (data: N) => string + readonly edgeLabel?: (data: E) => string + readonly graphName?: string +} + /** * Exports a graph to GraphViz DOT format for visualization. * @@ -1601,11 +1618,7 @@ export const neighborsDirected = ( */ export const toGraphViz = ( graph: Graph | MutableGraph, - options?: { - readonly nodeLabel?: (data: N) => string - readonly edgeLabel?: (data: E) => string - readonly graphName?: string - } + options?: GraphVizOptions ): string => { const { edgeLabel = (data: E) => String(data), @@ -1636,6 +1649,174 @@ export const toGraphViz = ( return lines.join("\n") } +// ============================================================================= +// Mermaid Export +// ============================================================================= + +/** + * Mermaid node shape types. + * + * @since 3.18.0 + * @category models + */ +export type MermaidNodeShape = + | "rectangle" + | "rounded" + | "circle" + | "diamond" + | "hexagon" + | "stadium" + | "subroutine" + | "cylindrical" + +/** + * Mermaid diagram direction types. + * + * @since 3.18.0 + * @category models + */ +export type MermaidDirection = "TB" | "TD" | "BT" | "LR" | "RL" + +/** + * Mermaid diagram type. + * + * @since 3.18.0 + * @category models + */ +export type MermaidDiagramType = "flowchart" | "graph" + +/** + * Configuration options for Mermaid diagram generation. + * + * @since 3.18.0 + * @category models + */ +export interface MermaidOptions { + readonly nodeLabel?: (data: N) => string + readonly edgeLabel?: (data: E) => string + readonly diagramType?: MermaidDiagramType + readonly direction?: MermaidDirection + readonly nodeShape?: (data: N) => MermaidNodeShape +} + +/** @internal */ +const escapeMermaidLabel = (label: string): string => { + // Escape special characters for Mermaid using HTML entity codes + // According to: https://mermaid.js.org/syntax/flowchart.html#special-characters-that-break-syntax + return label + .replace(/#/g, "#35;") + .replace(/"/g, "#quot;") + .replace(//g, "#gt;") + .replace(/&/g, "#amp;") + .replace(/\[/g, "#91;") + .replace(/\]/g, "#93;") + .replace(/\{/g, "#123;") + .replace(/\}/g, "#125;") + .replace(/\(/g, "#40;") + .replace(/\)/g, "#41;") + .replace(/\|/g, "#124;") + .replace(/\\/g, "#92;") + .replace(/\n/g, "
") +} + +/** @internal */ +const formatMermaidNode = (nodeId: string, label: string, shape: MermaidNodeShape): string => { + switch (shape) { + case "rectangle": + return `${nodeId}["${label}"]` + case "rounded": + return `${nodeId}("${label}")` + case "circle": + return `${nodeId}(("${label}"))` + case "diamond": + return `${nodeId}{"${label}"}` + case "hexagon": + return `${nodeId}{{"${label}"}}` + case "stadium": + return `${nodeId}(["${label}"])` + case "subroutine": + return `${nodeId}[["${label}"]]` + case "cylindrical": + return `${nodeId}[("${label}")]` + } +} + +/** + * Exports a graph to Mermaid diagram format for visualization. + * + * @example + * ```ts + * import { Graph } from "effect" + * + * const graph = Graph.mutate(Graph.directed(), (mutable) => { + * const app = Graph.addNode(mutable, "App") + * const db = Graph.addNode(mutable, "Database") + * const cache = Graph.addNode(mutable, "Cache") + * Graph.addEdge(mutable, app, db, 1) + * Graph.addEdge(mutable, app, cache, 2) + * }) + * + * const mermaid = Graph.toMermaid(graph) + * console.log(mermaid) + * // flowchart TD + * // 0["App"] + * // 1["Database"] + * // 2["Cache"] + * // 0 -->|"1"| 1 + * // 0 -->|"2"| 2 + * ``` + * + * @since 3.18.0 + * @category utils + */ +export const toMermaid = ( + graph: Graph | MutableGraph, + options?: MermaidOptions +): string => { + // Extract and validate options with defaults + const { + diagramType, + direction = "TD", + edgeLabel = (data: E) => String(data), + nodeLabel = (data: N) => String(data), + nodeShape = () => "rectangle" as const + } = options ?? {} + + // Auto-detect diagram type if not specified + const finalDiagramType = diagramType ?? + (graph.type === "directed" ? "flowchart" : "graph") + + // Generate diagram header + const lines: Array = [] + lines.push(`${finalDiagramType} ${direction}`) + + // Add nodes + for (const [nodeIndex, nodeData] of graph.nodes) { + const nodeId = String(nodeIndex) + const label = escapeMermaidLabel(nodeLabel(nodeData)) + const shape = nodeShape(nodeData) + const formattedNode = formatMermaidNode(nodeId, label, shape) + lines.push(` ${formattedNode}`) + } + + // Add edges + const edgeOperator = finalDiagramType === "flowchart" ? "-->" : "---" + for (const [, edgeData] of graph.edges) { + const sourceId = String(edgeData.source) + const targetId = String(edgeData.target) + const label = escapeMermaidLabel(edgeLabel(edgeData.data)) + + if (label) { + lines.push(` ${sourceId} ${edgeOperator}|"${label}"| ${targetId}`) + } else { + lines.push(` ${sourceId} ${edgeOperator} ${targetId}`) + } + } + + return lines.join("\n") +} + // ============================================================================= // Direction Types for Bidirectional Traversal // ============================================================================= @@ -1654,10 +1835,10 @@ export const toGraphViz = ( * }) * * // Follow outgoing edges (normal direction) - * const outgoingNodes = Array.from(Graph.indices(Graph.dfs(graph, { startNodes: [0], direction: "outgoing" }))) + * const outgoingNodes = Array.from(Graph.indices(Graph.dfs(graph, { start: [0], direction: "outgoing" }))) * * // Follow incoming edges (reverse direction) - * const incomingNodes = Array.from(Graph.indices(Graph.dfs(graph, { startNodes: [1], direction: "incoming" }))) + * const incomingNodes = Array.from(Graph.indices(Graph.dfs(graph, { start: [1], direction: "incoming" }))) * ``` * * @since 3.18.0 @@ -1878,13 +2059,13 @@ const getUndirectedNeighbors = ( const neighbors = new Set() // Check edges where this node is the source - const adjacencyList = getMapSafe(graph.adjacency, nodeIndex) - if (Option.isSome(adjacencyList)) { - for (const edgeIndex of adjacencyList.value) { - const edge = getMapSafe(graph.edges, edgeIndex) - if (Option.isSome(edge)) { + const adjacencyList = graph.adjacency.get(nodeIndex) + if (adjacencyList !== undefined) { + for (const edgeIndex of adjacencyList) { + const edge = graph.edges.get(edgeIndex) + if (edge !== undefined) { // For undirected graphs, the neighbor is the other endpoint - const otherNode = edge.value.source === nodeIndex ? edge.value.target : edge.value.source + const otherNode = edge.source === nodeIndex ? edge.target : edge.source neighbors.add(otherNode) } } @@ -2048,12 +2229,12 @@ export const stronglyConnectedComponents = ( scc.push(node) // Use reverse adjacency (transpose graph) - const reverseAdjacency = getMapSafe(graph.reverseAdjacency, node) - if (Option.isSome(reverseAdjacency)) { - for (const edgeIndex of reverseAdjacency.value) { - const edge = getMapSafe(graph.edges, edgeIndex) - if (Option.isSome(edge)) { - const predecessor = edge.value.source + const reverseAdjacency = graph.reverseAdjacency.get(node) + if (reverseAdjacency !== undefined) { + for (const edgeIndex of reverseAdjacency) { + const edge = graph.edges.get(edgeIndex) + if (edge !== undefined) { + const predecessor = edge.source if (!visited.has(predecessor)) { stack.push(predecessor) } @@ -2081,7 +2262,44 @@ export const stronglyConnectedComponents = ( export interface PathResult { readonly path: Array readonly distance: number - readonly edgeWeights: Array + readonly costs: Array +} + +/** + * Configuration for Dijkstra's algorithm. + * + * @since 3.18.0 + * @category models + */ +export interface DijkstraConfig { + source: NodeIndex + target: NodeIndex + cost: (edgeData: E) => number +} + +/** + * Configuration for A* algorithm. + * + * @since 3.18.0 + * @category models + */ +export interface AstarConfig { + source: NodeIndex + target: NodeIndex + cost: (edgeData: E) => number + heuristic: (sourceNodeData: N, targetNodeData: N) => number +} + +/** + * Configuration for Bellman-Ford algorithm. + * + * @since 3.18.0 + * @category models + */ +export interface BellmanFordConfig { + source: NodeIndex + target: NodeIndex + cost: (edgeData: E) => number } /** @@ -2103,7 +2321,7 @@ export interface PathResult { * Graph.addEdge(mutable, b, c, 2) * }) * - * const result = Graph.dijkstra(graph, 0, 2, (edgeData) => edgeData) + * const result = Graph.dijkstra(graph, { source: 0, target: 2, cost: (edgeData) => edgeData }) * if (Option.isSome(result)) { * console.log(result.value.path) // [0, 1, 2] - shortest path A->B->C * console.log(result.value.distance) // 7 - total distance @@ -2115,16 +2333,15 @@ export interface PathResult { */ export const dijkstra = ( graph: Graph | MutableGraph, - source: NodeIndex, - target: NodeIndex, - edgeWeight: (edgeData: E) => number + config: DijkstraConfig ): Option.Option> => { + const { cost, source, target } = config // Validate that source and target nodes exist if (!graph.nodes.has(source)) { - throw new Error(`Source node ${source} does not exist`) + throw missingNode(source) } if (!graph.nodes.has(target)) { - throw new Error(`Target node ${target} does not exist`) + throw missingNode(target) } // Early return if source equals target @@ -2132,7 +2349,7 @@ export const dijkstra = ( return Option.some({ path: [source], distance: 0, - edgeWeights: [] + costs: [] }) } @@ -2181,13 +2398,13 @@ export const dijkstra = ( const currentDistance = distances.get(currentNode)! // Examine all outgoing edges - const adjacencyList = getMapSafe(graph.adjacency, currentNode) - if (Option.isSome(adjacencyList)) { - for (const edgeIndex of adjacencyList.value) { - const edge = getMapSafe(graph.edges, edgeIndex) - if (Option.isSome(edge)) { - const neighbor = edge.value.target - const weight = edgeWeight(edge.value.data) + const adjacencyList = graph.adjacency.get(currentNode) + if (adjacencyList !== undefined) { + for (const edgeIndex of adjacencyList) { + const edge = graph.edges.get(edgeIndex) + if (edge !== undefined) { + const neighbor = edge.target + const weight = cost(edge.data) // Validate non-negative weights if (weight < 0) { @@ -2200,7 +2417,7 @@ export const dijkstra = ( // Relaxation step if (newDistance < neighborDistance) { distances.set(neighbor, newDistance) - previous.set(neighbor, { node: currentNode, edgeData: edge.value.data }) + previous.set(neighbor, { node: currentNode, edgeData: edge.data }) // Add to priority queue if not visited if (!visited.has(neighbor)) { @@ -2220,14 +2437,14 @@ export const dijkstra = ( // Reconstruct path const path: Array = [] - const edgeWeights: Array = [] + const costs: Array = [] let currentNode: NodeIndex | null = target while (currentNode !== null) { path.unshift(currentNode) const prev: { node: NodeIndex; edgeData: E } | null = previous.get(currentNode)! if (prev !== null) { - edgeWeights.unshift(prev.edgeData) + costs.unshift(prev.edgeData) currentNode = prev.node } else { currentNode = null @@ -2237,7 +2454,7 @@ export const dijkstra = ( return Option.some({ path, distance: targetDistance, - edgeWeights + costs }) } @@ -2250,7 +2467,7 @@ export const dijkstra = ( export interface AllPairsResult { readonly distances: Map> readonly paths: Map | null>> - readonly edgeWeights: Map>> + readonly costs: Map>> } /** @@ -2282,7 +2499,7 @@ export interface AllPairsResult { */ export const floydWarshall = ( graph: Graph | MutableGraph, - edgeWeight: (edgeData: E) => number + cost: (edgeData: E) => number ): AllPairsResult => { // Get all nodes for Floyd-Warshall algorithm (needs array for nested iteration) const allNodes = Array.from(graph.nodes.keys()) @@ -2307,7 +2524,7 @@ export const floydWarshall = ( // Set edge weights for (const [, edgeData] of graph.edges) { - const weight = edgeWeight(edgeData.data) + const weight = cost(edgeData.data) const i = edgeData.source const j = edgeData.target @@ -2345,19 +2562,19 @@ export const floydWarshall = ( // Build result paths and edge weights const paths = new Map | null>>() - const resultEdgeWeights = new Map>>() + const resultCosts = new Map>>() for (const i of allNodes) { paths.set(i, new Map()) - resultEdgeWeights.set(i, new Map()) + resultCosts.set(i, new Map()) for (const j of allNodes) { if (i === j) { paths.get(i)!.set(j, [i]) - resultEdgeWeights.get(i)!.set(j, []) + resultCosts.get(i)!.set(j, []) } else if (dist.get(i)!.get(j)! === Infinity) { paths.get(i)!.set(j, null) - resultEdgeWeights.get(i)!.set(j, []) + resultCosts.get(i)!.set(j, []) } else { // Reconstruct path iteratively const path: Array = [] @@ -2379,7 +2596,7 @@ export const floydWarshall = ( } paths.get(i)!.set(j, path) - resultEdgeWeights.get(i)!.set(j, weights) + resultCosts.get(i)!.set(j, weights) } } } @@ -2387,7 +2604,7 @@ export const floydWarshall = ( return { distances: dist, paths, - edgeWeights: resultEdgeWeights + costs: resultCosts } } @@ -2414,7 +2631,7 @@ export const floydWarshall = ( * const heuristic = (nodeData: {x: number, y: number}, targetData: {x: number, y: number}) => * Math.abs(nodeData.x - targetData.x) + Math.abs(nodeData.y - targetData.y) * - * const result = Graph.astar(graph, 0, 2, (edgeData) => edgeData, heuristic) + * const result = Graph.astar(graph, { source: 0, target: 2, cost: (edgeData) => edgeData, heuristic }) * if (Option.isSome(result)) { * console.log(result.value.path) // [0, 1, 2] - shortest path * console.log(result.value.distance) // 2 - total distance @@ -2426,17 +2643,15 @@ export const floydWarshall = ( */ export const astar = ( graph: Graph | MutableGraph, - source: NodeIndex, - target: NodeIndex, - edgeWeight: (edgeData: E) => number, - heuristic: (sourceNodeData: N, targetNodeData: N) => number + config: AstarConfig ): Option.Option> => { + const { cost, heuristic, source, target } = config // Validate that source and target nodes exist if (!graph.nodes.has(source)) { - throw new Error(`Source node ${source} does not exist`) + throw missingNode(source) } if (!graph.nodes.has(target)) { - throw new Error(`Target node ${target} does not exist`) + throw missingNode(target) } // Early return if source equals target @@ -2444,13 +2659,13 @@ export const astar = ( return Option.some({ path: [source], distance: 0, - edgeWeights: [] + costs: [] }) } // Get target node data for heuristic calculations - const targetNodeData = getMapSafe(graph.nodes, target) - if (Option.isNone(targetNodeData)) { + const targetNodeData = graph.nodes.get(target) + if (targetNodeData === undefined) { throw new Error(`Target node ${target} data not found`) } @@ -2469,9 +2684,9 @@ export const astar = ( } // Calculate initial f-score for source - const sourceNodeData = getMapSafe(graph.nodes, source) - if (Option.isSome(sourceNodeData)) { - const h = heuristic(sourceNodeData.value, targetNodeData.value) + const sourceNodeData = graph.nodes.get(source) + if (sourceNodeData !== undefined) { + const h = heuristic(sourceNodeData, targetNodeData) fScore.set(source, h) } @@ -2508,13 +2723,13 @@ export const astar = ( const currentGScore = gScore.get(currentNode)! // Examine all outgoing edges - const adjacencyList = getMapSafe(graph.adjacency, currentNode) - if (Option.isSome(adjacencyList)) { - for (const edgeIndex of adjacencyList.value) { - const edge = getMapSafe(graph.edges, edgeIndex) - if (Option.isSome(edge)) { - const neighbor = edge.value.target - const weight = edgeWeight(edge.value.data) + const adjacencyList = graph.adjacency.get(currentNode) + if (adjacencyList !== undefined) { + for (const edgeIndex of adjacencyList) { + const edge = graph.edges.get(edgeIndex) + if (edge !== undefined) { + const neighbor = edge.target + const weight = cost(edge.data) // Validate non-negative weights if (weight < 0) { @@ -2528,12 +2743,12 @@ export const astar = ( if (tentativeGScore < neighborGScore) { // Update g-score and previous gScore.set(neighbor, tentativeGScore) - previous.set(neighbor, { node: currentNode, edgeData: edge.value.data }) + previous.set(neighbor, { node: currentNode, edgeData: edge.data }) // Calculate f-score using heuristic - const neighborNodeData = getMapSafe(graph.nodes, neighbor) - if (Option.isSome(neighborNodeData)) { - const h = heuristic(neighborNodeData.value, targetNodeData.value) + const neighborNodeData = graph.nodes.get(neighbor) + if (neighborNodeData !== undefined) { + const h = heuristic(neighborNodeData, targetNodeData) const f = tentativeGScore + h fScore.set(neighbor, f) @@ -2556,14 +2771,14 @@ export const astar = ( // Reconstruct path const path: Array = [] - const edgeWeights: Array = [] + const costs: Array = [] let currentNode: NodeIndex | null = target while (currentNode !== null) { path.unshift(currentNode) const prev: { node: NodeIndex; edgeData: E } | null = previous.get(currentNode)! if (prev !== null) { - edgeWeights.unshift(prev.edgeData) + costs.unshift(prev.edgeData) currentNode = prev.node } else { currentNode = null @@ -2573,7 +2788,7 @@ export const astar = ( return Option.some({ path, distance: targetGScore, - edgeWeights + costs }) } @@ -2597,7 +2812,7 @@ export const astar = ( * Graph.addEdge(mutable, a, c, 5) * }) * - * const result = Graph.bellmanFord(graph, 0, 2, (edgeData) => edgeData) + * const result = Graph.bellmanFord(graph, { source: 0, target: 2, cost: (edgeData) => edgeData }) * if (Option.isSome(result)) { * console.log(result.value.path) // [0, 1, 2] - shortest path A->B->C * console.log(result.value.distance) // 2 - total distance @@ -2609,16 +2824,15 @@ export const astar = ( */ export const bellmanFord = ( graph: Graph | MutableGraph, - source: NodeIndex, - target: NodeIndex, - edgeWeight: (edgeData: E) => number + config: BellmanFordConfig ): Option.Option> => { + const { cost, source, target } = config // Validate that source and target nodes exist if (!graph.nodes.has(source)) { - throw new Error(`Source node ${source} does not exist`) + throw missingNode(source) } if (!graph.nodes.has(target)) { - throw new Error(`Target node ${target} does not exist`) + throw missingNode(target) } // Early return if source equals target @@ -2626,7 +2840,7 @@ export const bellmanFord = ( return Option.some({ path: [source], distance: 0, - edgeWeights: [] + costs: [] }) } @@ -2643,7 +2857,7 @@ export const bellmanFord = ( // Collect all edges for relaxation const edges: Array<{ source: NodeIndex; target: NodeIndex; weight: number; edgeData: E }> = [] for (const [, edgeData] of graph.edges) { - const weight = edgeWeight(edgeData.data) + const weight = cost(edgeData.data) edges.push({ source: edgeData.source, target: edgeData.target, @@ -2691,12 +2905,12 @@ export const bellmanFord = ( affectedNodes.add(node) // Add all nodes reachable from this node - const adjacencyList = getMapSafe(graph.adjacency, node) - if (Option.isSome(adjacencyList)) { - for (const edgeIndex of adjacencyList.value) { - const edge = getMapSafe(graph.edges, edgeIndex) - if (Option.isSome(edge)) { - queue.push(edge.value.target) + const adjacencyList = graph.adjacency.get(node) + if (adjacencyList !== undefined) { + for (const edgeIndex of adjacencyList) { + const edge = graph.edges.get(edgeIndex) + if (edge !== undefined) { + queue.push(edge.target) } } } @@ -2717,14 +2931,14 @@ export const bellmanFord = ( // Reconstruct path const path: Array = [] - const edgeWeights: Array = [] + const costs: Array = [] let currentNode: NodeIndex | null = target while (currentNode !== null) { path.unshift(currentNode) const prev: { node: NodeIndex; edgeData: E } | null = previous.get(currentNode)! if (prev !== null) { - edgeWeights.unshift(prev.edgeData) + costs.unshift(prev.edgeData) currentNode = prev.node } else { currentNode = null @@ -2734,7 +2948,7 @@ export const bellmanFord = ( return Option.some({ path, distance: targetDistance, - edgeWeights + costs }) } @@ -2756,7 +2970,7 @@ export const bellmanFord = ( * }) * * // Both traversal and element iterators return NodeWalker - * const dfsNodes: Graph.NodeWalker = Graph.dfs(graph, { startNodes: [0] }) + * const dfsNodes: Graph.NodeWalker = Graph.dfs(graph, { start: [0] }) * const allNodes: Graph.NodeWalker = Graph.nodes(graph) * * // Common interface for working with node iterables @@ -2793,7 +3007,7 @@ export class Walker implements Iterable<[T, N]> { * Graph.addEdge(mutable, a, b, 1) * }) * - * const dfs = Graph.dfs(graph, { startNodes: [0] }) + * const dfs = Graph.dfs(graph, { start: [0] }) * * // Map to just the node data * const values = Array.from(dfs.visit((index, data) => data)) @@ -2827,7 +3041,7 @@ export class Walker implements Iterable<[T, N]> { * Graph.addEdge(mutable, a, b, 1) * }) * - * const dfs = Graph.dfs(graph, { startNodes: [0] }) + * const dfs = Graph.dfs(graph, { start: [0] }) * * // Map to just the node data * const values = Array.from(dfs.visit((index, data) => data)) @@ -2879,7 +3093,7 @@ export type EdgeWalker = Walker> * Graph.addEdge(mutable, a, b, 1) * }) * - * const dfs = Graph.dfs(graph, { startNodes: [0] }) + * const dfs = Graph.dfs(graph, { start: [0] }) * const indices = Array.from(Graph.indices(dfs)) * console.log(indices) // [0, 1] * ``` @@ -2902,7 +3116,7 @@ export const indices = (walker: Walker): Iterable => walker.visit * Graph.addEdge(mutable, a, b, 1) * }) * - * const dfs = Graph.dfs(graph, { startNodes: [0] }) + * const dfs = Graph.dfs(graph, { start: [0] }) * const values = Array.from(Graph.values(dfs)) * console.log(values) // ["A", "B"] * ``` @@ -2925,7 +3139,7 @@ export const values = (walker: Walker): Iterable => walker.visit( * Graph.addEdge(mutable, a, b, 1) * }) * - * const dfs = Graph.dfs(graph, { startNodes: [0] }) + * const dfs = Graph.dfs(graph, { start: [0] }) * const entries = Array.from(Graph.entries(dfs)) * console.log(entries) // [[0, "A"], [1, "B"]] * ``` @@ -2937,13 +3151,13 @@ export const entries = (walker: Walker): Iterable<[T, N]> => walker.visit((index, data) => [index, data] as [T, N]) /** - * Configuration options for DFS iterator. + * Configuration for graph search iterators. * * @since 3.18.0 * @category models */ -export interface DfsConfig { - readonly startNodes?: Array +export interface SearchConfig { + readonly start?: Array readonly direction?: Direction } @@ -2966,7 +3180,7 @@ export interface DfsConfig { * }) * * // Start from a specific node - * const dfs1 = Graph.dfs(graph, { startNodes: [0] }) + * const dfs1 = Graph.dfs(graph, { start: [0] }) * for (const nodeIndex of Graph.indices(dfs1)) { * console.log(nodeIndex) // Traverses in DFS order: 0, 1, 2 * } @@ -2981,21 +3195,21 @@ export interface DfsConfig { */ export const dfs = ( graph: Graph | MutableGraph, - config: DfsConfig = {} + config: SearchConfig = {} ): NodeWalker => { - const startNodes = config.startNodes ?? [] + const start = config.start ?? [] const direction = config.direction ?? "outgoing" // Validate that all start nodes exist - for (const nodeIndex of startNodes) { + for (const nodeIndex of start) { if (!hasNode(graph, nodeIndex)) { - throw new Error(`Start node ${nodeIndex} does not exist`) + throw missingNode(nodeIndex) } } return new Walker((f) => ({ [Symbol.iterator]: () => { - const stack = [...startNodes] + const stack = [...start] const discovered = new Set() const nextMapped = () => { @@ -3008,8 +3222,8 @@ export const dfs = ( discovered.add(current) - const nodeDataOption = getMapSafe(graph.nodes, current) - if (Option.isNone(nodeDataOption)) { + const nodeDataOption = graph.nodes.get(current) + if (nodeDataOption === undefined) { continue } @@ -3021,7 +3235,7 @@ export const dfs = ( } } - return { done: false, value: f(current, nodeDataOption.value) } + return { done: false, value: f(current, nodeDataOption) } } return { done: true, value: undefined } as const @@ -3032,17 +3246,6 @@ export const dfs = ( })) } -/** - * Configuration options for BFS iterator. - * - * @since 3.18.0 - * @category models - */ -export interface BfsConfig { - readonly startNodes?: Array - readonly direction?: Direction -} - /** * Creates a new BFS iterator with optional configuration. * @@ -3062,7 +3265,7 @@ export interface BfsConfig { * }) * * // Start from a specific node - * const bfs1 = Graph.bfs(graph, { startNodes: [0] }) + * const bfs1 = Graph.bfs(graph, { start: [0] }) * for (const nodeIndex of Graph.indices(bfs1)) { * console.log(nodeIndex) // Traverses in BFS order: 0, 1, 2 * } @@ -3077,21 +3280,21 @@ export interface BfsConfig { */ export const bfs = ( graph: Graph | MutableGraph, - config: BfsConfig = {} + config: SearchConfig = {} ): NodeWalker => { - const startNodes = config.startNodes ?? [] + const start = config.start ?? [] const direction = config.direction ?? "outgoing" // Validate that all start nodes exist - for (const nodeIndex of startNodes) { + for (const nodeIndex of start) { if (!hasNode(graph, nodeIndex)) { - throw new Error(`Start node ${nodeIndex} does not exist`) + throw missingNode(nodeIndex) } } return new Walker((f) => ({ [Symbol.iterator]: () => { - const queue = [...startNodes] + const queue = [...start] const discovered = new Set() const nextMapped = () => { @@ -3193,7 +3396,7 @@ export const topo = ( // Validate that all initial nodes exist for (const nodeIndex of initials) { if (!hasNode(graph, nodeIndex)) { - throw new Error(`Initial node ${nodeIndex} does not exist`) + throw missingNode(nodeIndex) } } @@ -3262,17 +3465,6 @@ export const topo = ( })) } -/** - * Configuration options for DFS postorder iterator. - * - * @since 3.18.0 - * @category models - */ -export interface DfsPostOrderConfig { - readonly startNodes?: Array - readonly direction?: Direction -} - /** * Creates a new DFS postorder iterator with optional configuration. * @@ -3293,7 +3485,7 @@ export interface DfsPostOrderConfig { * }) * * // Postorder: children before parents - * const postOrder = Graph.dfsPostOrder(graph, { startNodes: [0] }) + * const postOrder = Graph.dfsPostOrder(graph, { start: [0] }) * for (const node of postOrder) { * console.log(node) // 1, 2, 0 * } @@ -3304,15 +3496,15 @@ export interface DfsPostOrderConfig { */ export const dfsPostOrder = ( graph: Graph | MutableGraph, - config: DfsPostOrderConfig = {} + config: SearchConfig = {} ): NodeWalker => { - const startNodes = config.startNodes ?? [] + const start = config.start ?? [] const direction = config.direction ?? "outgoing" // Validate that all start nodes exist - for (const nodeIndex of startNodes) { + for (const nodeIndex of start) { if (!hasNode(graph, nodeIndex)) { - throw new Error(`Start node ${nodeIndex} does not exist`) + throw missingNode(nodeIndex) } } @@ -3323,8 +3515,8 @@ export const dfsPostOrder = ( const finished = new Set() // Initialize stack with start nodes - for (let i = startNodes.length - 1; i >= 0; i--) { - stack.push({ node: startNodes[i], visitedChildren: false }) + for (let i = start.length - 1; i >= 0; i--) { + stack.push({ node: start[i], visitedChildren: false }) } const nextMapped = () => { @@ -3522,10 +3714,10 @@ export const externals = ( let current = nodeIterator.next() while (!current.done) { const [nodeIndex, nodeData] = current.value - const adjacencyList = getMapSafe(adjacencyMap, nodeIndex) + const adjacencyList = adjacencyMap.get(nodeIndex) // Node is external if it has no edges in the specified direction - if (Option.isNone(adjacencyList) || adjacencyList.value.length === 0) { + if (adjacencyList === undefined || adjacencyList.length === 0) { return { done: false, value: f(nodeIndex, nodeData) } } current = nodeIterator.next() diff --git a/packages/effect/src/HashRing.ts b/packages/effect/src/HashRing.ts new file mode 100644 index 00000000000..47f2e8d6a04 --- /dev/null +++ b/packages/effect/src/HashRing.ts @@ -0,0 +1,317 @@ +/** + * @since 3.19.0 + * @experimental + */ +import { dual } from "./Function.js" +import * as Hash from "./Hash.js" +import * as Inspectable from "./Inspectable.js" +import * as Iterable from "./Iterable.js" +import { type Pipeable, pipeArguments } from "./Pipeable.js" +import { hasProperty } from "./Predicate.js" +import * as PrimaryKey from "./PrimaryKey.js" + +const TypeId = "~effect/cluster/HashRing" as const + +/** + * @since 3.19.0 + * @category Models + * @experimental + */ +export interface HashRing
extends Pipeable, Iterable { + readonly [TypeId]: typeof TypeId + readonly baseWeight: number + totalWeightCache: number + readonly nodes: Map + ring: Array<[hash: number, node: string]> +} + +/** + * @since 3.19.0 + * @category Guards + * @experimental + */ +export const isHashRing = (u: unknown): u is HashRing => hasProperty(u, TypeId) + +/** + * @since 3.19.0 + * @category Constructors + * @experimental + */ +export const make = (options?: { + readonly baseWeight?: number | undefined +}): HashRing => { + const self = Object.create(Proto) + self.baseWeight = Math.max(options?.baseWeight ?? 128, 1) + self.totalWeightCache = 0 + self.nodes = new Map() + self.ring = [] + return self +} + +const Proto = { + [TypeId]: TypeId, + [Symbol.iterator](this: HashRing): Iterator { + return Iterable.map(this.nodes.values(), ([n]) => n)[Symbol.iterator]() + }, + pipe() { + return pipeArguments(this, arguments) + }, + ...Inspectable.BaseProto, + toJSON(this: HashRing) { + return { + _id: "HashRing", + baseWeight: this.baseWeight, + nodes: this.ring.map(([, n]) => this.nodes.get(n)![0]) + } + } +} + +/** + * Add new nodes to the ring. If a node already exists in the ring, it + * will be updated. For example, you can use this to update the node's weight. + * + * @since 3.19.0 + * @category Combinators + * @experimental + */ +export const addMany: { + (nodes: Iterable, options?: { + readonly weight?: number | undefined + }): (self: HashRing) => HashRing + (self: HashRing, nodes: Iterable, options?: { + readonly weight?: number | undefined + }): HashRing +} = dual( + (args) => isHashRing(args[0]), + (self: HashRing, nodes: Iterable, options?: { + readonly weight?: number | undefined + }): HashRing => { + const weight = Math.max(options?.weight ?? 1, 0.1) + const keys: Array = [] + let toRemove: Set | undefined + for (const node of nodes) { + const key = PrimaryKey.value(node) + const entry = self.nodes.get(key) + if (entry) { + if (entry[1] === weight) continue + toRemove ??= new Set() + toRemove.add(key) + self.totalWeightCache -= entry[1] + self.totalWeightCache += weight + entry[1] = weight + } else { + self.nodes.set(key, [node, weight]) + self.totalWeightCache += weight + } + keys.push(key) + } + if (toRemove) { + self.ring = self.ring.filter(([, n]) => !toRemove.has(n)) + } + addNodesToRing(self, keys, Math.round(weight * self.baseWeight)) + return self + } +) + +function addNodesToRing(self: HashRing, keys: Array, weight: number) { + for (let i = weight; i > 0; i--) { + for (let j = 0; j < keys.length; j++) { + const key = keys[j] + self.ring.push([ + Hash.string(`${key}:${i}`), + key + ]) + } + } + self.ring.sort((a, b) => a[0] - b[0]) +} + +/** + * Add a new node to the ring. If the node already exists in the ring, it + * will be updated. For example, you can use this to update the node's weight. + * + * @since 3.19.0 + * @category Combinators + * @experimental + */ +export const add: { + (node: A, options?: { + readonly weight?: number | undefined + }): (self: HashRing) => HashRing + (self: HashRing, node: A, options?: { + readonly weight?: number | undefined + }): HashRing +} = dual((args) => isHashRing(args[0]), (self: HashRing, node: A, options?: { + readonly weight?: number | undefined +}): HashRing => addMany(self, [node], options)) + +/** + * Removes the node from the ring. No-op's if the node does not exist. + * + * @since 3.19.0 + * @category Combinators + * @experimental + */ +export const remove: { + (node: A): (self: HashRing) => HashRing + (self: HashRing, node: A): HashRing +} = dual(2, (self: HashRing, node: A): HashRing => { + const key = PrimaryKey.value(node) + const entry = self.nodes.get(key) + if (entry) { + self.nodes.delete(key) + self.ring = self.ring.filter(([, n]) => n !== key) + self.totalWeightCache -= entry[1] + } + return self +}) + +/** + * @since 3.19.0 + * @category Combinators + * @experimental + */ +export const has: { + (node: A): (self: HashRing) => boolean + (self: HashRing, node: A): boolean +} = dual( + 2, + (self: HashRing, node: A): boolean => self.nodes.has(PrimaryKey.value(node)) +) + +/** + * Gets the node which should handle the given input. Returns undefined if + * the hashring has no elements with weight. + * + * @since 3.19.0 + * @category Combinators + * @experimental + */ +export const get = (self: HashRing, input: string): A | undefined => { + if (self.ring.length === 0) { + return undefined + } + const index = getIndexForInput(self, Hash.string(input))[0] + const node = self.ring[index][1]! + return self.nodes.get(node)![0] +} + +/** + * Distributes `count` shards across the nodes in the ring, attempting to + * balance the number of shards allocated to each node. Returns undefined if + * the hashring has no elements with weight. + * + * @since 3.19.0 + * @category Combinators + * @experimental + */ +export const getShards = (self: HashRing, count: number): Array | undefined => { + if (self.ring.length === 0) { + return undefined + } + + const shards = new Array(count) + + // for tracking how many shards have been allocated to each node + const allocations = new Map() + // for tracking which shards still need to be allocated + const remaining = new Set() + // for tracking which nodes have reached the max allocation + const exclude = new Set() + + // First pass - allocate the closest nodes, skipping nodes that have reached + // max + const distances = new Array<[shard: number, node: string, distance: number]>(count) + for (let shard = 0; shard < count; shard++) { + const hash = (shardHashes[shard] ??= Hash.string(`shard-${shard}`)) + const [index, distance] = getIndexForInput(self, hash) + const node = self.ring[index][1]! + distances[shard] = [shard, node, distance] + remaining.add(shard) + } + distances.sort((a, b) => a[2] - b[2]) + for (let i = 0; i < count; i++) { + const [shard, node] = distances[i] + if (exclude.has(node)) continue + const [value, weight] = self.nodes.get(node)! + shards[shard] = value + remaining.delete(shard) + const nodeCount = (allocations.get(node) ?? 0) + 1 + allocations.set(node, nodeCount) + const maxPerNode = Math.max(1, Math.floor(count * (weight / self.totalWeightCache))) + if (nodeCount >= maxPerNode) { + exclude.add(node) + } + } + + // Second pass - allocate any remaining shards, skipping nodes that have + // reached max + let allAtMax = exclude.size === self.nodes.size + remaining.forEach((shard) => { + const index = getIndexForInput(self, shardHashes[shard], allAtMax ? undefined : exclude)[0] + const node = self.ring[index][1] + const [value, weight] = self.nodes.get(node)! + shards[shard] = value + + if (allAtMax) return + const nodeCount = (allocations.get(node) ?? 0) + 1 + allocations.set(node, nodeCount) + const maxPerNode = Math.max(1, Math.floor(count * (weight / self.totalWeightCache))) + if (nodeCount >= maxPerNode) { + exclude.add(node) + if (exclude.size === self.nodes.size) { + allAtMax = true + } + } + }) + + return shards +} + +const shardHashes: Array = [] + +function getIndexForInput( + self: HashRing, + hash: number, + exclude?: ReadonlySet | undefined +): readonly [index: number, distance: number] { + const ring = self.ring + const len = ring.length + + let mid: number + let lo = 0 + let hi = len - 1 + + while (lo <= hi) { + mid = ((lo + hi) / 2) >>> 0 + if (ring[mid][0] >= hash) { + hi = mid - 1 + } else { + lo = mid + 1 + } + } + const a = lo === len ? lo - 1 : lo + const distA = Math.abs(ring[a][0] - hash) + if (exclude === undefined) { + const b = lo - 1 + if (b < 0) { + return [a, distA] + } + const distB = Math.abs(ring[b][0] - hash) + return distA <= distB ? [a, distA] : [b, distB] + } else if (!exclude.has(ring[a][1])) { + return [a, distA] + } + const range = Math.max(lo, len - lo) + for (let i = 1; i < range; i++) { + let index = lo - i + if (index >= 0 && index < len && !exclude.has(ring[index][1])) { + return [index, Math.abs(ring[index][0] - hash)] + } + index = lo + i + if (index >= 0 && index < len && !exclude.has(ring[index][1])) { + return [index, Math.abs(ring[index][0] - hash)] + } + } + return [a, distA] +} diff --git a/packages/effect/src/index.ts b/packages/effect/src/index.ts index f94e0ecbc37..06cfbe97bdb 100644 --- a/packages/effect/src/index.ts +++ b/packages/effect/src/index.ts @@ -376,6 +376,12 @@ export * as Hash from "./Hash.js" */ export * as HashMap from "./HashMap.js" +/** + * @since 3.19.0 + * @experimental + */ +export * as HashRing from "./HashRing.js" + /** * # HashSet * diff --git a/packages/effect/test/Graph.test.ts b/packages/effect/test/Graph.test.ts index 7bc96468842..42d03a2d6e0 100644 --- a/packages/effect/test/Graph.test.ts +++ b/packages/effect/test/Graph.test.ts @@ -1259,7 +1259,7 @@ describe("Graph", () => { const nonExistentNode = 999 Graph.addEdge(mutable, nonExistentNode, nodeB, 42) }) - }).toThrow("Source node 999 does not exist") + }).toThrow("Node 999 does not exist") }) it("should throw error when target node doesn't exist", () => { @@ -1269,7 +1269,7 @@ describe("Graph", () => { const nonExistentNode = 999 Graph.addEdge(mutable, nodeA, nonExistentNode, 42) }) - }).toThrow("Target node 999 does not exist") + }).toThrow("Node 999 does not exist") }) }) @@ -1752,6 +1752,328 @@ describe("Graph", () => { expect(dot).toContain("\"0\" -- \"3\" [label=\"friends\"];") }) }) + + describe("toMermaid", () => { + it("should export empty directed graph", () => { + const graph = Graph.directed() + const mermaid = Graph.toMermaid(graph) + expect(mermaid).toBe("flowchart TD") + }) + + it("should export empty undirected graph", () => { + const graph = Graph.undirected() + const mermaid = Graph.toMermaid(graph) + expect(mermaid).toBe("graph TD") + }) + + it("should export directed graph with nodes", () => { + const graph = Graph.mutate(Graph.directed(), (mutable) => { + Graph.addNode(mutable, "Node A") + Graph.addNode(mutable, "Node B") + Graph.addNode(mutable, "Node C") + }) + + const mermaid = Graph.toMermaid(graph) + expect(mermaid).toContain("flowchart TD") + expect(mermaid).toContain("0[\"Node A\"]") + expect(mermaid).toContain("1[\"Node B\"]") + expect(mermaid).toContain("2[\"Node C\"]") + }) + + it("should export undirected graph with nodes", () => { + const graph = Graph.mutate(Graph.undirected(), (mutable) => { + Graph.addNode(mutable, "Alice") + Graph.addNode(mutable, "Bob") + }) + + const mermaid = Graph.toMermaid(graph) + expect(mermaid).toContain("graph TD") + expect(mermaid).toContain("0[\"Alice\"]") + expect(mermaid).toContain("1[\"Bob\"]") + }) + + it("should support all node shapes", () => { + const shapes: Array<[string, Graph.MermaidNodeShape]> = [ + ["rectangle", "rectangle"], + ["rounded", "rounded"], + ["circle", "circle"], + ["diamond", "diamond"], + ["hexagon", "hexagon"], + ["stadium", "stadium"], + ["subroutine", "subroutine"], + ["cylindrical", "cylindrical"] + ] + + shapes.forEach(([shapeName, shapeValue]) => { + const graph = Graph.mutate(Graph.directed(), (mutable) => { + Graph.addNode(mutable, "Test") + }) + + const mermaid = Graph.toMermaid(graph, { + nodeShape: () => shapeValue + }) + + expect(mermaid).toContain("flowchart TD") + + // Test expected shape format + switch (shapeName) { + case "rectangle": + expect(mermaid).toContain("0[\"Test\"]") + break + case "rounded": + expect(mermaid).toContain("0(\"Test\")") + break + case "circle": + expect(mermaid).toContain("0((\"Test\"))") + break + case "diamond": + expect(mermaid).toContain("0{\"Test\"}") + break + case "hexagon": + expect(mermaid).toContain("0{{\"Test\"}}") + break + case "stadium": + expect(mermaid).toContain("0([\"Test\"])") + break + case "subroutine": + expect(mermaid).toContain("0[[\"Test\"]]") + break + case "cylindrical": + expect(mermaid).toContain("0[(\"Test\")]") + break + } + }) + }) + + it("should escape special characters in labels", () => { + const graph = Graph.mutate(Graph.directed(), (mutable) => { + Graph.addNode(mutable, "Node with \"quotes\"") + Graph.addNode(mutable, "Node with [brackets]") + Graph.addNode(mutable, "Node with | pipe") + Graph.addNode(mutable, "Node with \\ backslash") + Graph.addNode(mutable, "Node with \n newline") + }) + + const mermaid = Graph.toMermaid(graph) + + expect(mermaid).toContain("0[\"Node with #quot;quotes#quot;\"]") + expect(mermaid).toContain("1[\"Node with #91;brackets#93;\"]") + expect(mermaid).toContain("2[\"Node with #124; pipe\"]") + expect(mermaid).toContain("3[\"Node with #92; backslash\"]") + expect(mermaid).toContain("4[\"Node with
newline\"]") + }) + + it("should export directed graph with edges", () => { + const graph = Graph.mutate(Graph.directed(), (mutable) => { + const nodeA = Graph.addNode(mutable, "Node A") + const nodeB = Graph.addNode(mutable, "Node B") + const nodeC = Graph.addNode(mutable, "Node C") + Graph.addEdge(mutable, nodeA, nodeB, 1) + Graph.addEdge(mutable, nodeB, nodeC, 2) + Graph.addEdge(mutable, nodeC, nodeA, 3) + }) + + const mermaid = Graph.toMermaid(graph) + expect(mermaid).toContain("flowchart TD") + expect(mermaid).toContain("0[\"Node A\"]") + expect(mermaid).toContain("1[\"Node B\"]") + expect(mermaid).toContain("2[\"Node C\"]") + expect(mermaid).toContain("0 -->|\"1\"| 1") + expect(mermaid).toContain("1 -->|\"2\"| 2") + expect(mermaid).toContain("2 -->|\"3\"| 0") + }) + + it("should export undirected graph with edges", () => { + const graph = Graph.mutate(Graph.undirected(), (mutable) => { + const alice = Graph.addNode(mutable, "Alice") + const bob = Graph.addNode(mutable, "Bob") + const charlie = Graph.addNode(mutable, "Charlie") + Graph.addEdge(mutable, alice, bob, "friends") + Graph.addEdge(mutable, bob, charlie, "colleagues") + }) + + const mermaid = Graph.toMermaid(graph) + expect(mermaid).toContain("graph TD") + expect(mermaid).toContain("0[\"Alice\"]") + expect(mermaid).toContain("1[\"Bob\"]") + expect(mermaid).toContain("2[\"Charlie\"]") + expect(mermaid).toContain("0 ---|\"friends\"| 1") + expect(mermaid).toContain("1 ---|\"colleagues\"| 2") + }) + + it("should handle empty edge labels", () => { + const graph = Graph.mutate(Graph.directed(), (mutable) => { + const nodeA = Graph.addNode(mutable, "A") + const nodeB = Graph.addNode(mutable, "B") + Graph.addEdge(mutable, nodeA, nodeB, "") + }) + + const mermaid = Graph.toMermaid(graph) + expect(mermaid).toContain("0 --> 1") + }) + + it("should support all diagram directions", () => { + const directions: Array = ["TB", "TD", "BT", "RL", "LR"] + + directions.forEach((dir) => { + const graph = Graph.mutate(Graph.directed(), (mutable) => { + Graph.addNode(mutable, "A") + Graph.addNode(mutable, "B") + }) + + const mermaid = Graph.toMermaid(graph, { direction: dir }) + expect(mermaid).toContain(`flowchart ${dir}`) + expect(mermaid).toContain("0[\"A\"]") + expect(mermaid).toContain("1[\"B\"]") + }) + }) + + it("should auto-detect diagram type based on graph type", () => { + // Directed graph should auto-detect as flowchart + const directedGraph = Graph.mutate(Graph.directed(), (mutable) => { + Graph.addNode(mutable, "A") + }) + const directedMermaid = Graph.toMermaid(directedGraph) + expect(directedMermaid).toContain("flowchart TD") + + // Undirected graph should auto-detect as graph + const undirectedGraph = Graph.mutate(Graph.undirected(), (mutable) => { + Graph.addNode(mutable, "A") + }) + const undirectedMermaid = Graph.toMermaid(undirectedGraph) + expect(undirectedMermaid).toContain("graph TD") + }) + + it("should allow manual diagram type override", () => { + // Override directed graph to use 'graph' type + const directedGraph = Graph.mutate(Graph.directed(), (mutable) => { + Graph.addNode(mutable, "A") + }) + const overriddenMermaid = Graph.toMermaid(directedGraph, { + diagramType: "graph" + }) + expect(overriddenMermaid).toContain("graph TD") + + // Override undirected graph to use 'flowchart' type + const undirectedGraph = Graph.mutate(Graph.undirected(), (mutable) => { + Graph.addNode(mutable, "B") + }) + const overriddenFlowchart = Graph.toMermaid(undirectedGraph, { + diagramType: "flowchart" + }) + expect(overriddenFlowchart).toContain("flowchart TD") + }) + + it("should combine direction and diagram type options", () => { + const graph = Graph.mutate(Graph.directed(), (mutable) => { + Graph.addNode(mutable, "Test") + }) + + const mermaid = Graph.toMermaid(graph, { + direction: "LR", + diagramType: "graph" + }) + + expect(mermaid).toContain("graph LR") + expect(mermaid).toContain("0[\"Test\"]") + }) + + it("should handle self-loops correctly", () => { + const graph = Graph.mutate(Graph.directed(), (mutable) => { + const nodeA = Graph.addNode(mutable, "A") + Graph.addEdge(mutable, nodeA, nodeA, "self") + }) + + const mermaid = Graph.toMermaid(graph) + expect(mermaid).toContain("flowchart TD") + expect(mermaid).toContain("0[\"A\"]") + expect(mermaid).toContain("0 -->|\"self\"| 0") + }) + + it("should handle multi-edges correctly", () => { + const graph = Graph.mutate(Graph.directed(), (mutable) => { + const nodeA = Graph.addNode(mutable, "A") + const nodeB = Graph.addNode(mutable, "B") + Graph.addEdge(mutable, nodeA, nodeB, 1) + Graph.addEdge(mutable, nodeA, nodeB, 2) + Graph.addEdge(mutable, nodeA, nodeB, 3) + }) + + const mermaid = Graph.toMermaid(graph) + expect(mermaid).toContain("flowchart TD") + expect(mermaid).toContain("0[\"A\"]") + expect(mermaid).toContain("1[\"B\"]") + // Should contain all three edges + expect(mermaid).toContain("0 -->|\"1\"| 1") + expect(mermaid).toContain("0 -->|\"2\"| 1") + expect(mermaid).toContain("0 -->|\"3\"| 1") + }) + + it("should handle disconnected components", () => { + const graph = Graph.mutate(Graph.directed(), (mutable) => { + // Component 1: A -> B + const nodeA = Graph.addNode(mutable, "A") + const nodeB = Graph.addNode(mutable, "B") + Graph.addEdge(mutable, nodeA, nodeB, "A->B") + + // Component 2: C -> D (disconnected) + const nodeC = Graph.addNode(mutable, "C") + const nodeD = Graph.addNode(mutable, "D") + Graph.addEdge(mutable, nodeC, nodeD, "C->D") + + // Isolated node E + Graph.addNode(mutable, "E") + }) + + const mermaid = Graph.toMermaid(graph) + expect(mermaid).toContain("flowchart TD") + expect(mermaid).toContain("0[\"A\"]") + expect(mermaid).toContain("1[\"B\"]") + expect(mermaid).toContain("2[\"C\"]") + expect(mermaid).toContain("3[\"D\"]") + expect(mermaid).toContain("4[\"E\"]") + expect(mermaid).toContain("0 -->|\"A-#gt;B\"| 1") + expect(mermaid).toContain("2 -->|\"C-#gt;D\"| 3") + }) + + it("should handle custom labels with complex data", () => { + interface NodeData { + id: string + value: number + metadata: { type: string } + } + + interface EdgeData { + weight: number + type: string + } + + const graph = Graph.mutate(Graph.directed(), (mutable) => { + const node1 = Graph.addNode(mutable, { + id: "node1", + value: 42, + metadata: { type: "input" } + }) + const node2 = Graph.addNode(mutable, { + id: "node2", + value: 84, + metadata: { type: "processing" } + }) + Graph.addEdge(mutable, node1, node2, { weight: 1.5, type: "data" }) + }) + + const mermaid = Graph.toMermaid(graph, { + nodeLabel: (data) => `${data.id}:${data.value}`, + edgeLabel: (data) => `${data.type}(${data.weight})`, + direction: "LR" + }) + + expect(mermaid).toContain("flowchart LR") + expect(mermaid).toContain("0[\"node1:42\"]") + expect(mermaid).toContain("1[\"node2:84\"]") + expect(mermaid).toContain("0 -->|\"data#40;1.5#41;\"| 1") + }) + }) }) describe("Graph Structure Analysis Algorithms (Phase 5A)", () => { @@ -2024,12 +2346,12 @@ describe("Graph", () => { Graph.addEdge(mutable, nodeB, nodeC, 2) }) - const result = Graph.dijkstra(graph, nodeA!, nodeC!, (edge) => edge) + const result = Graph.dijkstra(graph, { source: nodeA!, target: nodeC!, cost: (edge) => edge }) expect(Option.isSome(result)).toBe(true) if (Option.isSome(result)) { expect(result.value.path).toEqual([nodeA!, nodeB!, nodeC!]) expect(result.value.distance).toBe(7) - expect(result.value.edgeWeights).toEqual([5, 2]) + expect(result.value.costs).toEqual([5, 2]) } }) @@ -2046,7 +2368,7 @@ describe("Graph", () => { // No path from A to C }) - const result = Graph.dijkstra(graph, nodeA!, nodeC!, (edge) => edge) + const result = Graph.dijkstra(graph, { source: nodeA!, target: nodeC!, cost: (edge) => edge }) expect(Option.isNone(result)).toBe(true) }) @@ -2057,12 +2379,12 @@ describe("Graph", () => { nodeA = Graph.addNode(mutable, "A") }) - const result = Graph.dijkstra(graph, nodeA!, nodeA!, (edge) => edge) + const result = Graph.dijkstra(graph, { source: nodeA!, target: nodeA!, cost: (edge) => edge }) expect(Option.isSome(result)).toBe(true) if (Option.isSome(result)) { expect(result.value.path).toEqual([nodeA!]) expect(result.value.distance).toBe(0) - expect(result.value.edgeWeights).toEqual([]) + expect(result.value.costs).toEqual([]) } }) @@ -2076,7 +2398,7 @@ describe("Graph", () => { Graph.addEdge(mutable, nodeA, nodeB, -1) }) - expect(() => Graph.dijkstra(graph, nodeA!, nodeB!, (edge) => edge)).toThrow( + expect(() => Graph.dijkstra(graph, { source: nodeA!, target: nodeB!, cost: (edge) => edge })).toThrow( "Dijkstra's algorithm requires non-negative edge weights" ) }) @@ -2084,8 +2406,8 @@ describe("Graph", () => { it("should throw for non-existent nodes", () => { const graph = Graph.directed() - expect(() => Graph.dijkstra(graph, 0, 1, (edge) => edge)).toThrow( - "Source node 0 does not exist" + expect(() => Graph.dijkstra(graph, { source: 0, target: 1, cost: (edge) => edge })).toThrow( + "Node 0 does not exist" ) }) }) @@ -2107,12 +2429,12 @@ describe("Graph", () => { const heuristic = (source: { x: number; y: number }, target: { x: number; y: number }) => Math.abs(source.x - target.x) + Math.abs(source.y - target.y) - const result = Graph.astar(graph, nodeA!, nodeC!, (edge) => edge, heuristic) + const result = Graph.astar(graph, { source: nodeA!, target: nodeC!, cost: (edge) => edge, heuristic }) expect(Option.isSome(result)).toBe(true) if (Option.isSome(result)) { expect(result.value.path).toEqual([nodeA!, nodeB!, nodeC!]) expect(result.value.distance).toBe(2) - expect(result.value.edgeWeights).toEqual([1, 1]) + expect(result.value.costs).toEqual([1, 1]) } }) @@ -2128,7 +2450,7 @@ describe("Graph", () => { const heuristic = (source: { x: number; y: number }, target: { x: number; y: number }) => Math.abs(source.x - target.x) + Math.abs(source.y - target.y) - const result = Graph.astar(graph, 0, 2, (edge) => edge, heuristic) + const result = Graph.astar(graph, { source: 0, target: 2, cost: (edge) => edge, heuristic }) expect(Option.isNone(result)).toBe(true) }) @@ -2140,12 +2462,12 @@ describe("Graph", () => { const heuristic = (source: { x: number; y: number }, target: { x: number; y: number }) => Math.abs(source.x - target.x) + Math.abs(source.y - target.y) - const result = Graph.astar(graph, 0, 0, (edge) => edge, heuristic) + const result = Graph.astar(graph, { source: 0, target: 0, cost: (edge) => edge, heuristic }) expect(Option.isSome(result)).toBe(true) if (Option.isSome(result)) { expect(result.value.path).toEqual([0]) expect(result.value.distance).toBe(0) - expect(result.value.edgeWeights).toEqual([]) + expect(result.value.costs).toEqual([]) } }) @@ -2159,7 +2481,7 @@ describe("Graph", () => { const heuristic = (source: { x: number; y: number }, target: { x: number; y: number }) => Math.abs(source.x - target.x) + Math.abs(source.y - target.y) - expect(() => Graph.astar(graph, 0, 1, (edge) => edge, heuristic)).toThrow( + expect(() => Graph.astar(graph, { source: 0, target: 1, cost: (edge) => edge, heuristic })).toThrow( "A* algorithm requires non-negative edge weights" ) }) @@ -2176,12 +2498,12 @@ describe("Graph", () => { Graph.addEdge(mutable, a, c, 5) }) - const result = Graph.bellmanFord(graph, 0, 2, (edge) => edge) + const result = Graph.bellmanFord(graph, { source: 0, target: 2, cost: (edge) => edge }) expect(Option.isSome(result)).toBe(true) if (Option.isSome(result)) { expect(result.value.path).toEqual([0, 1, 2]) expect(result.value.distance).toBe(2) - expect(result.value.edgeWeights).toEqual([-1, 3]) + expect(result.value.costs).toEqual([-1, 3]) } }) @@ -2194,7 +2516,7 @@ describe("Graph", () => { // No path from A to C }) - const result = Graph.bellmanFord(graph, 0, 2, (edge) => edge) + const result = Graph.bellmanFord(graph, { source: 0, target: 2, cost: (edge) => edge }) expect(Option.isNone(result)).toBe(true) }) @@ -2203,12 +2525,12 @@ describe("Graph", () => { Graph.addNode(mutable, "A") }) - const result = Graph.bellmanFord(graph, 0, 0, (edge) => edge) + const result = Graph.bellmanFord(graph, { source: 0, target: 0, cost: (edge) => edge }) expect(Option.isSome(result)).toBe(true) if (Option.isSome(result)) { expect(result.value.path).toEqual([0]) expect(result.value.distance).toBe(0) - expect(result.value.edgeWeights).toEqual([]) + expect(result.value.costs).toEqual([]) } }) @@ -2222,7 +2544,7 @@ describe("Graph", () => { Graph.addEdge(mutable, c, a, 1) }) - const result = Graph.bellmanFord(graph, 0, 2, (edge) => edge) + const result = Graph.bellmanFord(graph, { source: 0, target: 2, cost: (edge) => edge }) expect(Option.isNone(result)).toBe(true) }) }) @@ -2243,7 +2565,7 @@ describe("Graph", () => { // Check distance A to C (should be 5 via B, not 7 direct) expect(result.distances.get(0)?.get(2)).toBe(5) expect(result.paths.get(0)?.get(2)).toEqual([0, 1, 2]) - expect(result.edgeWeights.get(0)?.get(2)).toEqual([3, 2]) + expect(result.costs.get(0)?.get(2)).toEqual([3, 2]) // Check distance A to B expect(result.distances.get(0)?.get(1)).toBe(3) @@ -2278,7 +2600,7 @@ describe("Graph", () => { expect(result.distances.get(0)?.get(0)).toBe(0) expect(result.paths.get(0)?.get(0)).toEqual([0]) - expect(result.edgeWeights.get(0)?.get(0)).toEqual([]) + expect(result.costs.get(0)?.get(0)).toEqual([]) }) it("should detect negative cycles", () => { @@ -2305,7 +2627,7 @@ describe("Graph", () => { Graph.addEdge(mutable, b, c, 2) }) - const dfsIterator = Graph.dfs(graph, { startNodes: [0] }) + const dfsIterator = Graph.dfs(graph, { start: [0] }) const values = Array.from(Graph.values(dfsIterator)) expect(values).toEqual(["A", "B", "C"]) @@ -2320,7 +2642,7 @@ describe("Graph", () => { Graph.addEdge(mutable, b, c, 2) }) - const dfsIterator = Graph.dfs(graph, { startNodes: [0] }) + const dfsIterator = Graph.dfs(graph, { start: [0] }) const entries = Array.from(Graph.entries(dfsIterator)) expect(entries).toEqual([[0, "A"], [1, "B"], [2, "C"]]) @@ -2335,7 +2657,7 @@ describe("Graph", () => { Graph.addEdge(mutable, a, c, 2) }) - const bfsIterator = Graph.bfs(graph, { startNodes: [0] }) + const bfsIterator = Graph.bfs(graph, { start: [0] }) const values = Array.from(Graph.values(bfsIterator)) expect(values).toEqual(["A", "B", "C"]) @@ -2350,7 +2672,7 @@ describe("Graph", () => { Graph.addEdge(mutable, a, c, 2) }) - const bfsIterator = Graph.bfs(graph, { startNodes: [0] }) + const bfsIterator = Graph.bfs(graph, { start: [0] }) const entries = Array.from(Graph.entries(bfsIterator)) expect(entries).toEqual([[0, "A"], [1, "B"], [2, "C"]]) @@ -2438,7 +2760,7 @@ describe("Graph", () => { Graph.addEdge(mutable, b, c, 2) }) - const dfsPostIterator = Graph.dfsPostOrder(graph, { startNodes: [0] }) + const dfsPostIterator = Graph.dfsPostOrder(graph, { start: [0] }) const values = Array.from(Graph.values(dfsPostIterator)) expect(values).toEqual(["C", "B", "A"]) // Postorder: children before parents @@ -2453,7 +2775,7 @@ describe("Graph", () => { Graph.addEdge(mutable, b, c, 2) }) - const dfsPostIterator = Graph.dfsPostOrder(graph, { startNodes: [0] }) + const dfsPostIterator = Graph.dfsPostOrder(graph, { start: [0] }) const entries = Array.from(Graph.entries(dfsPostIterator)) expect(entries).toEqual([[2, "C"], [1, "B"], [0, "A"]]) // Postorder: children before parents @@ -2470,7 +2792,7 @@ describe("Graph", () => { Graph.addEdge(mutable, b, c, 2) }) - const postOrder = Array.from(Graph.indices(Graph.dfsPostOrder(graph, { startNodes: [0] }))) + const postOrder = Array.from(Graph.indices(Graph.dfsPostOrder(graph, { start: [0] }))) expect(postOrder).toEqual([2, 1, 0]) // Children before parents }) @@ -2488,7 +2810,7 @@ describe("Graph", () => { Graph.addEdge(mutable, right, leaf2, 4) }) - const postOrder = Array.from(Graph.indices(Graph.dfsPostOrder(graph, { startNodes: [0] }))) + const postOrder = Array.from(Graph.indices(Graph.dfsPostOrder(graph, { start: [0] }))) // Should visit leaves first, then parents expect(postOrder).toEqual([3, 1, 4, 2, 0]) }) @@ -2498,7 +2820,7 @@ describe("Graph", () => { Graph.addNode(mutable, "A") }) - const postOrder = Array.from(Graph.dfsPostOrder(graph, { startNodes: [] })) + const postOrder = Array.from(Graph.dfsPostOrder(graph, { start: [] })) expect(postOrder).toEqual([]) }) @@ -2514,7 +2836,7 @@ describe("Graph", () => { // No connection between (A,B) and (C,D) }) - const postOrder = Array.from(Graph.indices(Graph.dfsPostOrder(graph, { startNodes: [0, 2] }))) + const postOrder = Array.from(Graph.indices(Graph.dfsPostOrder(graph, { start: [0, 2] }))) expect(postOrder).toEqual([1, 0, 3, 2]) // Each component in postorder }) @@ -2530,7 +2852,7 @@ describe("Graph", () => { // Starting from C, going backwards const postOrder = Array.from( Graph.indices(Graph.dfsPostOrder(graph, { - startNodes: [2], + start: [2], direction: "incoming" })) ) @@ -2547,7 +2869,7 @@ describe("Graph", () => { Graph.addEdge(mutable, c, a, 3) // Creates cycle }) - const postOrder = Array.from(Graph.indices(Graph.dfsPostOrder(graph, { startNodes: [0] }))) + const postOrder = Array.from(Graph.indices(Graph.dfsPostOrder(graph, { start: [0] }))) // Should handle cycle without infinite loop, visiting each node once expect(postOrder.length).toBe(3) expect(new Set(postOrder)).toEqual(new Set([0, 1, 2])) @@ -2558,8 +2880,8 @@ describe("Graph", () => { Graph.addNode(mutable, "A") }) - expect(() => Graph.dfsPostOrder(graph, { startNodes: [99] })) - .toThrow("Start node 99 does not exist") + expect(() => Graph.dfsPostOrder(graph, { start: [99] })) + .toThrow("Node 99 does not exist") }) it("should be iterable multiple times with fresh state", () => { @@ -2569,7 +2891,7 @@ describe("Graph", () => { Graph.addEdge(mutable, a, b, 1) }) - const iterator = Graph.dfsPostOrder(graph, { startNodes: [0] }) + const iterator = Graph.dfsPostOrder(graph, { start: [0] }) const firstRun = Array.from(Graph.indices(iterator)) const secondRun = Array.from(Graph.indices(iterator)) @@ -2601,7 +2923,7 @@ describe("Graph", () => { return originalGetNode.call(this, key) } - const iterator = Graph.dfsPostOrder(graph, { startNodes: [0] }) + const iterator = Graph.dfsPostOrder(graph, { start: [0] }) const results = Array.from(iterator) // Restore original method @@ -2780,7 +3102,7 @@ describe("Graph", () => { }) // Should work with different iterator types - const dfsIterable = Graph.dfs(graph, { startNodes: [0] }) + const dfsIterable = Graph.dfs(graph, { start: [0] }) const nodesIterable = Graph.nodes(graph) const externalsIterable = Graph.externals(graph) @@ -2809,7 +3131,7 @@ describe("Graph", () => { } // Both traversal and element iterators implement NodeWalker - const dfsNodes = Graph.dfs(graph, { startNodes: [0] }) + const dfsNodes = Graph.dfs(graph, { start: [0] }) const allNodes = Graph.nodes(graph) const externalNodes = Graph.externals(graph, { direction: "outgoing" }) @@ -2828,7 +3150,7 @@ describe("Graph", () => { const nodeIterable: Graph.NodeWalker = Graph.nodes(graph) const traversalIterable: Graph.NodeWalker = Graph.dfs(graph, { - startNodes: [0] + start: [0] }) expect(Array.from(Graph.indices(nodeIterable))).toEqual([0, 1]) @@ -2847,7 +3169,7 @@ describe("Graph", () => { }) // Test with traversal iterators - const dfsIterable = Graph.dfs(graph, { startNodes: [0] }) + const dfsIterable = Graph.dfs(graph, { start: [0] }) const dfsValues = Array.from(Graph.values(dfsIterable)) expect(dfsValues).toEqual(["A", "B", "C"]) @@ -2870,7 +3192,7 @@ describe("Graph", () => { }) // Test with traversal iterator - const dfsIterable = Graph.dfs(graph, { startNodes: [0] }) + const dfsIterable = Graph.dfs(graph, { start: [0] }) const dfsEntries = Array.from(Graph.entries(dfsIterable)) expect(dfsEntries).toEqual([[0, "A"], [1, "B"]]) @@ -2892,7 +3214,7 @@ describe("Graph", () => { Graph.addEdge(mutable, a, b, 1) }) - const dfs = Graph.dfs(graph, { startNodes: [0] }) + const dfs = Graph.dfs(graph, { start: [0] }) // Instance methods should work const instanceValues = Array.from(Graph.values(dfs)) @@ -2909,7 +3231,7 @@ describe("Graph", () => { Graph.addEdge(mutable, a, b, 1) }) - const dfs = Graph.dfs(graph, { startNodes: [0] }) + const dfs = Graph.dfs(graph, { start: [0] }) // Test mapEntry with custom mapping const custom = Array.from(dfs.visit((index, data) => ({ id: index, name: data }))) diff --git a/packages/platform-bun/src/BunClusterRunnerHttp.ts b/packages/platform-bun/src/BunClusterHttp.ts similarity index 58% rename from packages/platform-bun/src/BunClusterRunnerHttp.ts rename to packages/platform-bun/src/BunClusterHttp.ts index 2975c609d97..476a4faf98c 100644 --- a/packages/platform-bun/src/BunClusterRunnerHttp.ts +++ b/packages/platform-bun/src/BunClusterHttp.ts @@ -3,12 +3,13 @@ */ import * as HttpRunner from "@effect/cluster/HttpRunner" import * as MessageStorage from "@effect/cluster/MessageStorage" -import type * as Runners from "@effect/cluster/Runners" +import * as RunnerHealth from "@effect/cluster/RunnerHealth" +import * as Runners from "@effect/cluster/Runners" +import * as RunnerStorage from "@effect/cluster/RunnerStorage" import type { Sharding } from "@effect/cluster/Sharding" import * as ShardingConfig from "@effect/cluster/ShardingConfig" -import * as ShardStorage from "@effect/cluster/ShardStorage" import * as SqlMessageStorage from "@effect/cluster/SqlMessageStorage" -import * as SqlShardStorage from "@effect/cluster/SqlShardStorage" +import * as SqlRunnerStorage from "@effect/cluster/SqlRunnerStorage" import type * as Etag from "@effect/platform/Etag" import * as FetchHttpClient from "@effect/platform/FetchHttpClient" import type { HttpPlatform } from "@effect/platform/HttpPlatform" @@ -16,7 +17,6 @@ import type { HttpServer } from "@effect/platform/HttpServer" import type { ServeError } from "@effect/platform/HttpServerError" import * as RpcSerialization from "@effect/rpc/RpcSerialization" import type { SqlClient } from "@effect/sql/SqlClient" -import type { SqlError } from "@effect/sql/SqlError" import type { ConfigError } from "effect/ConfigError" import * as Effect from "effect/Effect" import * as Layer from "effect/Layer" @@ -38,11 +38,9 @@ export const layerHttpServer: Layer.Layer< ShardingConfig.ShardingConfig > = Effect.gen(function*() { const config = yield* ShardingConfig.ShardingConfig - const listenAddress = config.runnerListenAddress.pipe( - Option.orElse(() => config.runnerAddress) - ) - if (Option.isNone(listenAddress)) { - return yield* Effect.dieMessage("BunClusterHttpRunners.layerHttpServer: ShardingConfig.runnerAddress is None") + const listenAddress = Option.orElse(config.runnerListenAddress, () => config.runnerAddress) + if (listenAddress._tag === "None") { + return yield* Effect.die("BunClusterHttp.layerHttpServer: ShardingConfig.runnerAddress is None") } return BunHttpServer.layer(listenAddress.value) }).pipe(Layer.unwrapEffect) @@ -53,7 +51,7 @@ export const layerHttpServer: Layer.Layer< */ export const layer = < const ClientOnly extends boolean = false, - const Storage extends "noop" | "sql" = never + const Storage extends "local" | "sql" | "byo" = never >(options: { readonly transport: "http" | "websocket" readonly serialization?: "msgpack" | "ndjson" | undefined @@ -61,14 +59,18 @@ export const layer = < readonly storage?: Storage | undefined readonly shardingConfig?: Partial | undefined }): ClientOnly extends true ? Layer.Layer< - Sharding | Runners.Runners | MessageStorage.MessageStorage, - ConfigError | ("sql" extends Storage ? SqlError : never), - "sql" extends Storage ? SqlClient : never + Sharding | Runners.Runners | ("byo" extends Storage ? never : MessageStorage.MessageStorage), + ConfigError, + "local" extends Storage ? never + : "byo" extends Storage ? (MessageStorage.MessageStorage | RunnerStorage.RunnerStorage) + : SqlClient > : Layer.Layer< Sharding | Runners.Runners | MessageStorage.MessageStorage, - ServeError | ConfigError | ("sql" extends Storage ? SqlError : never), - "sql" extends Storage ? SqlClient : never + ServeError | ConfigError, + "local" extends Storage ? never + : "byo" extends Storage ? (MessageStorage.MessageStorage | RunnerStorage.RunnerStorage) + : SqlClient > => { const layer: Layer.Layer = options.clientOnly @@ -81,16 +83,37 @@ export const layer = < ? Layer.provide(HttpRunner.layerHttp, [layerHttpServer, FetchHttpClient.layer]) : Layer.provide(HttpRunner.layerWebsocket, [layerHttpServer, BunSocket.layerWebSocketConstructor]) + const runnerHealth: Layer.Layer = options?.clientOnly + ? Layer.empty as any + // TODO: when bun supports adding custom CA certificates + // : options?.runnerHealth === "k8s" + // ? RunnerHealth.layerK8s().pipe( + // Layer.provide([NodeFileSystem.layer, layerHttpClientK8s]) + // ) + : RunnerHealth.layerPing.pipe( + Layer.provide(Runners.layerRpc), + Layer.provide( + options.transport === "http" + ? HttpRunner.layerClientProtocolHttpDefault.pipe(Layer.provide(FetchHttpClient.layer)) + : HttpRunner.layerClientProtocolWebsocketDefault.pipe(Layer.provide(BunSocket.layerWebSocketConstructor)) + ) + ) + return layer.pipe( + Layer.provide(runnerHealth), Layer.provideMerge( - options?.storage === "sql" ? - SqlMessageStorage.layer - : MessageStorage.layerNoop + options?.storage === "local" + ? MessageStorage.layerNoop + : options?.storage === "byo" + ? Layer.empty + : Layer.orDie(SqlMessageStorage.layer) ), Layer.provide( - options?.storage === "sql" - ? options.clientOnly ? Layer.empty : SqlShardStorage.layer - : ShardStorage.layerNoop + options?.storage === "local" + ? RunnerStorage.layerMemory + : options?.storage === "byo" + ? Layer.empty + : Layer.orDie(SqlRunnerStorage.layer) ), Layer.provide(ShardingConfig.layerFromEnv(options?.shardingConfig)), Layer.provide( diff --git a/packages/platform-bun/src/BunClusterRunnerSocket.ts b/packages/platform-bun/src/BunClusterRunnerSocket.ts deleted file mode 100644 index f863b18e120..00000000000 --- a/packages/platform-bun/src/BunClusterRunnerSocket.ts +++ /dev/null @@ -1,9 +0,0 @@ -/** - * @since 1.0.0 - */ - -/** - * @since 1.0.0 - * @category Re-exports - */ -export * from "@effect/platform-node-shared/NodeClusterRunnerSocket" diff --git a/packages/platform-bun/src/BunClusterShardManagerHttp.ts b/packages/platform-bun/src/BunClusterShardManagerHttp.ts deleted file mode 100644 index be383124142..00000000000 --- a/packages/platform-bun/src/BunClusterShardManagerHttp.ts +++ /dev/null @@ -1,72 +0,0 @@ -/** - * @since 1.0.0 - */ -import * as HttpShardManager from "@effect/cluster/HttpShardManager" -import * as ShardingConfig from "@effect/cluster/ShardingConfig" -import * as ShardManager from "@effect/cluster/ShardManager" -import * as ShardStorage from "@effect/cluster/ShardStorage" -import * as SqlShardStorage from "@effect/cluster/SqlShardStorage" -import type * as Etag from "@effect/platform/Etag" -import * as FetchHttpClient from "@effect/platform/FetchHttpClient" -import type { HttpPlatform } from "@effect/platform/HttpPlatform" -import type { HttpServer } from "@effect/platform/HttpServer" -import type { ServeError } from "@effect/platform/HttpServerError" -import * as RpcSerialization from "@effect/rpc/RpcSerialization" -import type { SqlClient } from "@effect/sql/SqlClient" -import type { SqlError } from "@effect/sql/SqlError" -import type { ConfigError } from "effect/ConfigError" -import * as Effect from "effect/Effect" -import * as Layer from "effect/Layer" -import type { BunContext } from "./BunContext.js" -import * as BunHttpServer from "./BunHttpServer.js" -import * as BunSocket from "./BunSocket.js" - -/** - * @since 1.0.0 - * @category Layers - */ -export const layerHttpServer: Layer.Layer< - | HttpPlatform - | Etag.Generator - | BunContext - | HttpServer, - ServeError, - ShardingConfig.ShardingConfig -> = Effect.gen(function*() { - const config = yield* ShardingConfig.ShardingConfig - return BunHttpServer.layer(config.shardManagerAddress) -}).pipe(Layer.unwrapEffect) - -/** - * @since 1.0.0 - * @category Layers - */ -export const layer = (options: { - readonly transport: "http" | "websocket" - readonly serialization?: "msgpack" | "ndjson" | undefined - readonly shardingConfig?: Partial | undefined - readonly storage?: Storage | undefined - readonly config?: Partial | undefined -}): Layer.Layer< - ShardManager.ShardManager, - ServeError | ConfigError | ("sql" extends Storage ? SqlError : never), - "sql" extends Storage ? SqlClient : never -> => { - const layer: Layer.Layer = options.transport === "http" ? - HttpShardManager.layerHttp.pipe( - Layer.provide([HttpShardManager.layerRunnerHealthHttp, layerHttpServer]), - Layer.provide(FetchHttpClient.layer) - ) : - HttpShardManager.layerWebsocket.pipe( - Layer.provide([HttpShardManager.layerRunnerHealthWebsocket, layerHttpServer]), - Layer.provide(BunSocket.layerWebSocketConstructor) - ) - return layer.pipe( - Layer.provide(options?.storage === "sql" ? SqlShardStorage.layer : ShardStorage.layerNoop), - Layer.provide([ - ShardingConfig.layerFromEnv(options.shardingConfig), - ShardManager.layerConfigFromEnv(options?.config), - options?.serialization === "ndjson" ? RpcSerialization.layerNdjson : RpcSerialization.layerMsgPack - ]) - ) -} diff --git a/packages/platform-bun/src/BunClusterShardManagerSocket.ts b/packages/platform-bun/src/BunClusterShardManagerSocket.ts deleted file mode 100644 index 1fed8e6c7f1..00000000000 --- a/packages/platform-bun/src/BunClusterShardManagerSocket.ts +++ /dev/null @@ -1,9 +0,0 @@ -/** - * @since 1.0.0 - */ - -/** - * @since 1.0.0 - * @category Re-exports - */ -export * from "@effect/platform-node-shared/NodeClusterShardManagerSocket" diff --git a/packages/platform-bun/src/BunClusterSocket.ts b/packages/platform-bun/src/BunClusterSocket.ts new file mode 100644 index 00000000000..82599f65fc0 --- /dev/null +++ b/packages/platform-bun/src/BunClusterSocket.ts @@ -0,0 +1,101 @@ +/** + * @since 1.0.0 + */ +import * as MessageStorage from "@effect/cluster/MessageStorage" +import * as RunnerHealth from "@effect/cluster/RunnerHealth" +import * as Runners from "@effect/cluster/Runners" +import * as RunnerStorage from "@effect/cluster/RunnerStorage" +import type { Sharding } from "@effect/cluster/Sharding" +import * as ShardingConfig from "@effect/cluster/ShardingConfig" +import * as SocketRunner from "@effect/cluster/SocketRunner" +import * as SqlMessageStorage from "@effect/cluster/SqlMessageStorage" +import * as SqlRunnerStorage from "@effect/cluster/SqlRunnerStorage" +import { layerClientProtocol, layerSocketServer } from "@effect/platform-node-shared/NodeClusterSocket" +import type * as SocketServer from "@effect/platform/SocketServer" +import * as RpcSerialization from "@effect/rpc/RpcSerialization" +import type { SqlClient } from "@effect/sql/SqlClient" +import type { ConfigError } from "effect/ConfigError" +import * as Layer from "effect/Layer" + +export { + /** + * @since 1.0.0 + * @category Re-exports + */ + layerClientProtocol, + /** + * @since 1.0.0 + * @category Re-exports + */ + layerSocketServer +} + +/** + * @since 1.0.0 + * @category Layers + */ +export const layer = < + const ClientOnly extends boolean = false, + const Storage extends "local" | "sql" | "byo" = never +>( + options?: { + readonly serialization?: "msgpack" | "ndjson" | undefined + readonly clientOnly?: ClientOnly | undefined + readonly storage?: Storage | undefined + readonly shardingConfig?: Partial | undefined + } +): ClientOnly extends true ? Layer.Layer< + Sharding | Runners.Runners | ("byo" extends Storage ? never : MessageStorage.MessageStorage), + ConfigError, + "local" extends Storage ? never + : "byo" extends Storage ? (MessageStorage.MessageStorage | RunnerStorage.RunnerStorage) + : SqlClient + > : + Layer.Layer< + Sharding | Runners.Runners | ("byo" extends Storage ? never : MessageStorage.MessageStorage), + SocketServer.SocketServerError | ConfigError, + "local" extends Storage ? never + : "byo" extends Storage ? (MessageStorage.MessageStorage | RunnerStorage.RunnerStorage) + : SqlClient + > => +{ + const layer: Layer.Layer = options?.clientOnly + // client only + ? Layer.provide(SocketRunner.layerClientOnly, layerClientProtocol) + // with server + : Layer.provide(SocketRunner.layer, [layerSocketServer, layerClientProtocol]) + + const runnerHealth: Layer.Layer = options?.clientOnly + ? Layer.empty as any + // TODO: when bun supports adding custom CA certificates + // : options?.runnerHealth === "k8s" + // ? RunnerHealth.layerK8s().pipe( + // Layer.provide([NodeFileSystem.layer, layerHttpClientK8s]) + // ) + : RunnerHealth.layerPing.pipe( + Layer.provide(Runners.layerRpc), + Layer.provide(layerClientProtocol) + ) + + return layer.pipe( + Layer.provide(runnerHealth), + Layer.provideMerge( + options?.storage === "local" + ? MessageStorage.layerNoop + : options?.storage === "byo" + ? Layer.empty + : Layer.orDie(SqlMessageStorage.layer) + ), + Layer.provide( + options?.storage === "local" + ? RunnerStorage.layerMemory + : options?.storage === "byo" + ? Layer.empty + : Layer.orDie(SqlRunnerStorage.layer) + ), + Layer.provide(ShardingConfig.layerFromEnv(options?.shardingConfig)), + Layer.provide( + options?.serialization === "ndjson" ? RpcSerialization.layerNdjson : RpcSerialization.layerMsgPack + ) + ) as any +} diff --git a/packages/platform-bun/src/index.ts b/packages/platform-bun/src/index.ts index a07d446d774..c14781eb03c 100644 --- a/packages/platform-bun/src/index.ts +++ b/packages/platform-bun/src/index.ts @@ -1,22 +1,12 @@ /** * @since 1.0.0 */ -export * as BunClusterRunnerHttp from "./BunClusterRunnerHttp.js" +export * as BunClusterHttp from "./BunClusterHttp.js" /** * @since 1.0.0 */ -export * as BunClusterRunnerSocket from "./BunClusterRunnerSocket.js" - -/** - * @since 1.0.0 - */ -export * as BunClusterShardManagerHttp from "./BunClusterShardManagerHttp.js" - -/** - * @since 1.0.0 - */ -export * as BunClusterShardManagerSocket from "./BunClusterShardManagerSocket.js" +export * as BunClusterSocket from "./BunClusterSocket.js" /** * @since 1.0.0 diff --git a/packages/platform-node-shared/src/NodeClusterRunnerSocket.ts b/packages/platform-node-shared/src/NodeClusterRunnerSocket.ts deleted file mode 100644 index 6ced08ad1ec..00000000000 --- a/packages/platform-node-shared/src/NodeClusterRunnerSocket.ts +++ /dev/null @@ -1,86 +0,0 @@ -/** - * @since 1.0.0 - */ -import * as MessageStorage from "@effect/cluster/MessageStorage" -import type * as Runners from "@effect/cluster/Runners" -import type { Sharding } from "@effect/cluster/Sharding" -import * as ShardingConfig from "@effect/cluster/ShardingConfig" -import * as ShardStorage from "@effect/cluster/ShardStorage" -import * as SocketRunner from "@effect/cluster/SocketRunner" -import * as SqlMessageStorage from "@effect/cluster/SqlMessageStorage" -import * as SqlShardStorage from "@effect/cluster/SqlShardStorage" -import type * as SocketServer from "@effect/platform/SocketServer" -import * as RpcSerialization from "@effect/rpc/RpcSerialization" -import type { SqlClient } from "@effect/sql/SqlClient" -import type { SqlError } from "@effect/sql/SqlError" -import type { ConfigError } from "effect/ConfigError" -import * as Effect from "effect/Effect" -import * as Layer from "effect/Layer" -import * as Option from "effect/Option" -import { layerClientProtocol } from "./NodeClusterSocketCommon.js" -import * as NodeSocketServer from "./NodeSocketServer.js" - -/** - * @since 1.0.0 - * @category Layers - */ -export const layerSocketServer: Layer.Layer< - SocketServer.SocketServer, - SocketServer.SocketServerError, - ShardingConfig.ShardingConfig -> = Effect.gen(function*() { - const config = yield* ShardingConfig.ShardingConfig - const listenAddress = config.runnerListenAddress.pipe( - Option.orElse(() => config.runnerAddress) - ) - if (Option.isNone(listenAddress)) { - return yield* Effect.dieMessage("layerSocketServer: ShardingConfig.runnerListenAddress is None") - } - return NodeSocketServer.layer(listenAddress.value) -}).pipe(Layer.unwrapEffect) - -/** - * @since 1.0.0 - * @category Layers - */ -export const layer = ( - options?: { - readonly serialization?: "msgpack" | "ndjson" | undefined - readonly clientOnly?: ClientOnly | undefined - readonly storage?: Storage | undefined - readonly shardingConfig?: Partial | undefined - } -): ClientOnly extends true ? Layer.Layer< - Sharding | Runners.Runners | MessageStorage.MessageStorage, - ConfigError, - "sql" extends Storage ? SqlClient : never - > : - Layer.Layer< - Sharding | Runners.Runners | MessageStorage.MessageStorage, - SocketServer.SocketServerError | ConfigError | ("sql" extends Storage ? SqlError : never), - "sql" extends Storage ? SqlClient : never - > => -{ - const layer: Layer.Layer = options?.clientOnly - // client only - ? Layer.provide(SocketRunner.layerClientOnly, layerClientProtocol) - // with server - : Layer.provide(SocketRunner.layer, [layerSocketServer, layerClientProtocol]) - - return layer.pipe( - Layer.provideMerge( - options?.storage === "sql" - ? SqlMessageStorage.layer - : MessageStorage.layerNoop - ), - Layer.provide( - options?.storage === "sql" - ? options.clientOnly ? Layer.empty : SqlShardStorage.layer - : ShardStorage.layerNoop - ), - Layer.provide(ShardingConfig.layerFromEnv(options?.shardingConfig)), - Layer.provide( - options?.serialization === "ndjson" ? RpcSerialization.layerNdjson : RpcSerialization.layerMsgPack - ) - ) as any -} diff --git a/packages/platform-node-shared/src/NodeClusterShardManagerSocket.ts b/packages/platform-node-shared/src/NodeClusterShardManagerSocket.ts deleted file mode 100644 index 92269b2388b..00000000000 --- a/packages/platform-node-shared/src/NodeClusterShardManagerSocket.ts +++ /dev/null @@ -1,59 +0,0 @@ -/** - * @since 1.0.0 - */ -import * as RunnerHealth from "@effect/cluster/RunnerHealth" -import * as ShardingConfig from "@effect/cluster/ShardingConfig" -import * as ShardManager from "@effect/cluster/ShardManager" -import * as ShardStorage from "@effect/cluster/ShardStorage" -import * as SocketShardManager from "@effect/cluster/SocketShardManager" -import * as SqlShardStorage from "@effect/cluster/SqlShardStorage" -import type * as SocketServer from "@effect/platform/SocketServer" -import * as RpcSerialization from "@effect/rpc/RpcSerialization" -import type { SqlClient } from "@effect/sql/SqlClient" -import type { SqlError } from "@effect/sql/SqlError" -import type { ConfigError } from "effect/ConfigError" -import * as Effect from "effect/Effect" -import * as Layer from "effect/Layer" -import { layerClientProtocol } from "./NodeClusterSocketCommon.js" -import * as NodeSocketServer from "./NodeSocketServer.js" - -/** - * @since 1.0.0 - * @category Layers - */ -export const layerSocketServer: Layer.Layer< - SocketServer.SocketServer, - SocketServer.SocketServerError, - ShardingConfig.ShardingConfig -> = Effect.gen(function*() { - const config = yield* ShardingConfig.ShardingConfig - return NodeSocketServer.layer(config.shardManagerAddress) -}).pipe(Layer.unwrapEffect) - -/** - * @since 1.0.0 - * @category Layers - */ -export const layer = (options?: { - readonly serialization?: "msgpack" | "ndjson" | undefined - readonly shardingConfig?: Partial | undefined - readonly storage?: Storage | undefined - readonly config?: Partial | undefined -}): Layer.Layer< - ShardManager.ShardManager, - SocketServer.SocketServerError | ConfigError | ("sql" extends Storage ? SqlError : never), - "sql" extends Storage ? SqlClient : never -> => - SocketShardManager.layer.pipe( - Layer.provide([ - RunnerHealth.layerRpc, - layerSocketServer, - ShardManager.layerConfigFromEnv(options?.config) - ]), - Layer.provide(layerClientProtocol), - Layer.provide(options?.storage === "sql" ? SqlShardStorage.layer : ShardStorage.layerNoop), - Layer.provide([ - options?.serialization === "ndjson" ? RpcSerialization.layerNdjson : RpcSerialization.layerMsgPack, - ShardingConfig.layerFromEnv(options?.shardingConfig) - ]) - ) as any diff --git a/packages/platform-node-shared/src/NodeClusterSocketCommon.ts b/packages/platform-node-shared/src/NodeClusterSocket.ts similarity index 52% rename from packages/platform-node-shared/src/NodeClusterSocketCommon.ts rename to packages/platform-node-shared/src/NodeClusterSocket.ts index 4f7d9b148e1..93dad9d2995 100644 --- a/packages/platform-node-shared/src/NodeClusterSocketCommon.ts +++ b/packages/platform-node-shared/src/NodeClusterSocket.ts @@ -2,12 +2,16 @@ * @since 1.0.0 */ import * as Runners from "@effect/cluster/Runners" +import * as ShardingConfig from "@effect/cluster/ShardingConfig" import { Socket } from "@effect/platform/Socket" +import type * as SocketServer from "@effect/platform/SocketServer" import * as RpcClient from "@effect/rpc/RpcClient" import * as RpcSerialization from "@effect/rpc/RpcSerialization" import * as Effect from "effect/Effect" import * as Layer from "effect/Layer" +import * as Option from "effect/Option" import * as NodeSocket from "./NodeSocket.js" +import * as NodeSocketServer from "./NodeSocketServer.js" /** * @since 1.0.0 @@ -17,16 +21,15 @@ export const layerClientProtocol: Layer.Layer< Runners.RpcClientProtocol, never, RpcSerialization.RpcSerialization -> = Layer.effect( - Runners.RpcClientProtocol, +> = Layer.effect(Runners.RpcClientProtocol)( Effect.gen(function*() { const serialization = yield* RpcSerialization.RpcSerialization return Effect.fnUntraced(function*(address) { const socket = yield* NodeSocket.makeNet({ - host: address.host, - port: address.port, + openTimeout: 1000, timeout: 5500, - openTimeout: 1000 + host: address.host, + port: address.port }) return yield* RpcClient.makeProtocolSocket().pipe( Effect.provideService(Socket, socket), @@ -35,3 +38,20 @@ export const layerClientProtocol: Layer.Layer< }, Effect.orDie) }) ) + +/** + * @since 1.0.0 + * @category Layers + */ +export const layerSocketServer: Layer.Layer< + SocketServer.SocketServer, + SocketServer.SocketServerError, + ShardingConfig.ShardingConfig +> = Effect.gen(function*() { + const config = yield* ShardingConfig.ShardingConfig + const listenAddress = Option.orElse(config.runnerListenAddress, () => config.runnerAddress) + if (listenAddress._tag === "None") { + return yield* Effect.die("layerSocketServer: ShardingConfig.runnerListenAddress is None") + } + return NodeSocketServer.layer(listenAddress.value) +}).pipe(Layer.unwrapEffect) diff --git a/packages/platform-node/examples/cluster-shard-manager.ts b/packages/platform-node/examples/cluster-shard-manager.ts deleted file mode 100644 index b3d37e3d3bc..00000000000 --- a/packages/platform-node/examples/cluster-shard-manager.ts +++ /dev/null @@ -1,7 +0,0 @@ -import { NodeClusterShardManagerSocket, NodeRuntime } from "@effect/platform-node" -import { Layer } from "effect" - -NodeClusterShardManagerSocket.layer().pipe( - Layer.launch, - NodeRuntime.runMain -) diff --git a/packages/platform-node/examples/cluster-runner.ts b/packages/platform-node/examples/cluster.ts similarity index 90% rename from packages/platform-node/examples/cluster-runner.ts rename to packages/platform-node/examples/cluster.ts index 3842935959d..c10de8814e9 100644 --- a/packages/platform-node/examples/cluster-runner.ts +++ b/packages/platform-node/examples/cluster.ts @@ -1,5 +1,5 @@ import { Entity, RunnerAddress, Singleton } from "@effect/cluster" -import { NodeClusterRunnerSocket, NodeRuntime } from "@effect/platform-node" +import { NodeClusterSocket, NodeRuntime } from "@effect/platform-node" import { Rpc } from "@effect/rpc" import { Effect, Layer, Logger, LogLevel, Option, Schema } from "effect" @@ -50,9 +50,9 @@ const SendMessage = Singleton.make( }) ) -for (let i = 0; i < 10; i++) { - const ShardingLive = NodeClusterRunnerSocket.layer({ - storage: "noop", +for (let i = 0; i < 1; i++) { + const ShardingLive = NodeClusterSocket.layer({ + storage: "local", shardingConfig: { runnerAddress: Option.some(RunnerAddress.make("localhost", 50000 + i)) } diff --git a/packages/platform-node/src/NodeClusterRunnerHttp.ts b/packages/platform-node/src/NodeClusterHttp.ts similarity index 53% rename from packages/platform-node/src/NodeClusterRunnerHttp.ts rename to packages/platform-node/src/NodeClusterHttp.ts index 4efc4dbe9f5..46edd6c2cd7 100644 --- a/packages/platform-node/src/NodeClusterRunnerHttp.ts +++ b/packages/platform-node/src/NodeClusterHttp.ts @@ -3,25 +3,27 @@ */ import * as HttpRunner from "@effect/cluster/HttpRunner" import * as MessageStorage from "@effect/cluster/MessageStorage" -import type * as Runners from "@effect/cluster/Runners" +import * as RunnerHealth from "@effect/cluster/RunnerHealth" +import * as Runners from "@effect/cluster/Runners" +import * as RunnerStorage from "@effect/cluster/RunnerStorage" import type { Sharding } from "@effect/cluster/Sharding" import * as ShardingConfig from "@effect/cluster/ShardingConfig" -import * as ShardStorage from "@effect/cluster/ShardStorage" import * as SqlMessageStorage from "@effect/cluster/SqlMessageStorage" -import * as SqlShardStorage from "@effect/cluster/SqlShardStorage" +import * as SqlRunnerStorage from "@effect/cluster/SqlRunnerStorage" import type * as Etag from "@effect/platform/Etag" import type { HttpPlatform } from "@effect/platform/HttpPlatform" import type { HttpServer } from "@effect/platform/HttpServer" import type { ServeError } from "@effect/platform/HttpServerError" import * as RpcSerialization from "@effect/rpc/RpcSerialization" import type { SqlClient } from "@effect/sql/SqlClient" -import type { SqlError } from "@effect/sql/SqlError" import type { ConfigError } from "effect/ConfigError" import * as Effect from "effect/Effect" import * as Layer from "effect/Layer" import * as Option from "effect/Option" import { createServer } from "node:http" +import { layerHttpClientK8s } from "./NodeClusterSocket.js" import type { NodeContext } from "./NodeContext.js" +import * as NodeFileSystem from "./NodeFileSystem.js" import * as NodeHttpClient from "./NodeHttpClient.js" import * as NodeHttpServer from "./NodeHttpServer.js" import * as NodeSocket from "./NodeSocket.js" @@ -32,22 +34,32 @@ import * as NodeSocket from "./NodeSocket.js" */ export const layer = < const ClientOnly extends boolean = false, - const Storage extends "noop" | "sql" = never + const Storage extends "local" | "sql" | "byo" = never, + const Health extends "ping" | "k8s" = never >(options: { readonly transport: "http" | "websocket" readonly serialization?: "msgpack" | "ndjson" | undefined readonly clientOnly?: ClientOnly | undefined readonly storage?: Storage | undefined + readonly runnerHealth?: Health | undefined + readonly runnerHealthK8s?: { + readonly namespace?: string | undefined + readonly labelSelector?: string | undefined + } | undefined readonly shardingConfig?: Partial | undefined }): ClientOnly extends true ? Layer.Layer< - Sharding | Runners.Runners | MessageStorage.MessageStorage, - ConfigError | ("sql" extends Storage ? SqlError : never), - "sql" extends Storage ? SqlClient : never + Sharding | Runners.Runners | ("byo" extends Storage ? never : MessageStorage.MessageStorage), + ConfigError, + "local" extends Storage ? never + : "byo" extends Storage ? (MessageStorage.MessageStorage | RunnerStorage.RunnerStorage) + : SqlClient > : Layer.Layer< - Sharding | Runners.Runners | MessageStorage.MessageStorage, - ServeError | ConfigError | ("sql" extends Storage ? SqlError : never), - "sql" extends Storage ? SqlClient : never + Sharding | Runners.Runners | ("byo" extends Storage ? never : MessageStorage.MessageStorage), + ServeError | ConfigError, + "local" extends Storage ? never + : "byo" extends Storage ? (MessageStorage.MessageStorage | RunnerStorage.RunnerStorage) + : SqlClient > => { const layer: Layer.Layer = options.clientOnly @@ -60,16 +72,36 @@ export const layer = < ? Layer.provide(HttpRunner.layerHttp, [layerHttpServer, NodeHttpClient.layerUndici]) : Layer.provide(HttpRunner.layerWebsocket, [layerHttpServer, NodeSocket.layerWebSocketConstructor]) + const runnerHealth: Layer.Layer = options?.clientOnly + ? Layer.empty as any + : options?.runnerHealth === "k8s" + ? RunnerHealth.layerK8s(options.runnerHealthK8s).pipe( + Layer.provide([NodeFileSystem.layer, layerHttpClientK8s]) + ) + : RunnerHealth.layerPing.pipe( + Layer.provide(Runners.layerRpc), + Layer.provide( + options.transport === "http" + ? HttpRunner.layerClientProtocolHttpDefault.pipe(Layer.provide(NodeHttpClient.layerUndici)) + : HttpRunner.layerClientProtocolWebsocketDefault.pipe(Layer.provide(NodeSocket.layerWebSocketConstructor)) + ) + ) + return layer.pipe( + Layer.provide(runnerHealth), Layer.provideMerge( - options?.storage === "sql" ? - SqlMessageStorage.layer - : MessageStorage.layerNoop + options?.storage === "local" + ? MessageStorage.layerNoop + : options?.storage === "byo" + ? Layer.empty + : Layer.orDie(SqlMessageStorage.layer) ), Layer.provide( - options?.storage === "sql" - ? options.clientOnly ? Layer.empty : SqlShardStorage.layer - : ShardStorage.layerNoop + options?.storage === "local" + ? RunnerStorage.layerMemory + : options?.storage === "byo" + ? Layer.empty + : Layer.orDie(SqlRunnerStorage.layer) ), Layer.provide(ShardingConfig.layerFromEnv(options?.shardingConfig)), Layer.provide( @@ -91,11 +123,9 @@ export const layerHttpServer: Layer.Layer< ShardingConfig.ShardingConfig > = Effect.gen(function*() { const config = yield* ShardingConfig.ShardingConfig - const listenAddress = config.runnerListenAddress.pipe( - Option.orElse(() => config.runnerAddress) - ) - if (Option.isNone(listenAddress)) { - return yield* Effect.dieMessage("NodeClusterHttpRunner.layerHttpServer: ShardingConfig.podAddress is None") + const listenAddress = Option.orElse(config.runnerListenAddress, () => config.runnerAddress) + if (listenAddress._tag === "None") { + return yield* Effect.die("NodeClusterHttp.layerHttpServer: ShardingConfig.runnerAddress is None") } return NodeHttpServer.layer(createServer, listenAddress.value) }).pipe(Layer.unwrapEffect) diff --git a/packages/platform-node/src/NodeClusterRunnerSocket.ts b/packages/platform-node/src/NodeClusterRunnerSocket.ts deleted file mode 100644 index f863b18e120..00000000000 --- a/packages/platform-node/src/NodeClusterRunnerSocket.ts +++ /dev/null @@ -1,9 +0,0 @@ -/** - * @since 1.0.0 - */ - -/** - * @since 1.0.0 - * @category Re-exports - */ -export * from "@effect/platform-node-shared/NodeClusterRunnerSocket" diff --git a/packages/platform-node/src/NodeClusterShardManagerHttp.ts b/packages/platform-node/src/NodeClusterShardManagerHttp.ts deleted file mode 100644 index 39ead03507f..00000000000 --- a/packages/platform-node/src/NodeClusterShardManagerHttp.ts +++ /dev/null @@ -1,73 +0,0 @@ -/** - * @since 1.0.0 - */ -import * as HttpShardManager from "@effect/cluster/HttpShardManager" -import * as ShardingConfig from "@effect/cluster/ShardingConfig" -import * as ShardManager from "@effect/cluster/ShardManager" -import * as ShardStorage from "@effect/cluster/ShardStorage" -import * as SqlShardStorage from "@effect/cluster/SqlShardStorage" -import type * as Etag from "@effect/platform/Etag" -import type { HttpPlatform } from "@effect/platform/HttpPlatform" -import type { HttpServer } from "@effect/platform/HttpServer" -import type { ServeError } from "@effect/platform/HttpServerError" -import * as RpcSerialization from "@effect/rpc/RpcSerialization" -import type { SqlClient } from "@effect/sql/SqlClient" -import type { SqlError } from "@effect/sql/SqlError" -import type { ConfigError } from "effect/ConfigError" -import * as Effect from "effect/Effect" -import * as Layer from "effect/Layer" -import { createServer } from "node:http" -import type { NodeContext } from "./NodeContext.js" -import * as NodeHttpClient from "./NodeHttpClient.js" -import * as NodeHttpServer from "./NodeHttpServer.js" -import * as NodeSocket from "./NodeSocket.js" - -/** - * @since 1.0.0 - * @category Layers - */ -export const layerHttpServer: Layer.Layer< - | HttpPlatform - | Etag.Generator - | NodeContext - | HttpServer, - ServeError, - ShardingConfig.ShardingConfig -> = Effect.gen(function*() { - const config = yield* ShardingConfig.ShardingConfig - return NodeHttpServer.layer(createServer, config.shardManagerAddress) -}).pipe(Layer.unwrapEffect) - -/** - * @since 1.0.0 - * @category Layers - */ -export const layer = (options: { - readonly transport: "http" | "websocket" - readonly serialization?: "msgpack" | "ndjson" | undefined - readonly shardingConfig?: Partial | undefined - readonly storage?: Storage | undefined - readonly config?: Partial | undefined -}): Layer.Layer< - ShardManager.ShardManager, - ServeError | ConfigError | ("sql" extends Storage ? SqlError : never), - "sql" extends Storage ? SqlClient : never -> => { - const layer: Layer.Layer = options.transport === "http" ? - HttpShardManager.layerHttp.pipe( - Layer.provide([HttpShardManager.layerRunnerHealthHttp, layerHttpServer]), - Layer.provide(NodeHttpClient.layerUndici) - ) : - HttpShardManager.layerWebsocket.pipe( - Layer.provide([HttpShardManager.layerRunnerHealthWebsocket, layerHttpServer]), - Layer.provide(NodeSocket.layerWebSocketConstructor) - ) - return layer.pipe( - Layer.provide(options?.storage === "sql" ? SqlShardStorage.layer : ShardStorage.layerNoop), - Layer.provide([ - ShardingConfig.layerFromEnv(options.shardingConfig), - ShardManager.layerConfigFromEnv(options?.config), - options?.serialization === "ndjson" ? RpcSerialization.layerNdjson : RpcSerialization.layerMsgPack - ]) - ) -} diff --git a/packages/platform-node/src/NodeClusterShardManagerSocket.ts b/packages/platform-node/src/NodeClusterShardManagerSocket.ts deleted file mode 100644 index 1fed8e6c7f1..00000000000 --- a/packages/platform-node/src/NodeClusterShardManagerSocket.ts +++ /dev/null @@ -1,9 +0,0 @@ -/** - * @since 1.0.0 - */ - -/** - * @since 1.0.0 - * @category Re-exports - */ -export * from "@effect/platform-node-shared/NodeClusterShardManagerSocket" diff --git a/packages/platform-node/src/NodeClusterSocket.ts b/packages/platform-node/src/NodeClusterSocket.ts new file mode 100644 index 00000000000..d3e18dcb5e6 --- /dev/null +++ b/packages/platform-node/src/NodeClusterSocket.ts @@ -0,0 +1,150 @@ +/** + * @since 1.0.0 + */ +import * as MessageStorage from "@effect/cluster/MessageStorage" +import * as RunnerHealth from "@effect/cluster/RunnerHealth" +import * as Runners from "@effect/cluster/Runners" +import * as RunnerStorage from "@effect/cluster/RunnerStorage" +import type { Sharding } from "@effect/cluster/Sharding" +import * as ShardingConfig from "@effect/cluster/ShardingConfig" +import * as SocketRunner from "@effect/cluster/SocketRunner" +import * as SqlMessageStorage from "@effect/cluster/SqlMessageStorage" +import * as SqlRunnerStorage from "@effect/cluster/SqlRunnerStorage" +import { layerClientProtocol, layerSocketServer } from "@effect/platform-node-shared/NodeClusterSocket" +import * as FileSystem from "@effect/platform/FileSystem" +import type * as HttpClient from "@effect/platform/HttpClient" +import type * as SocketServer from "@effect/platform/SocketServer" +import * as RpcSerialization from "@effect/rpc/RpcSerialization" +import type { SqlClient } from "@effect/sql/SqlClient" +import type { ConfigError } from "effect/ConfigError" +import * as Effect from "effect/Effect" +import * as Layer from "effect/Layer" +import * as NodeFileSystem from "./NodeFileSystem.js" +import * as NodeHttpClient from "./NodeHttpClient.js" +import * as Undici from "./Undici.js" + +export { + /** + * @since 1.0.0 + * @category Re-exports + */ + layerClientProtocol, + /** + * @since 1.0.0 + * @category Re-exports + */ + layerSocketServer +} + +/** + * @since 1.0.0 + * @category Layers + */ +export const layer = < + const ClientOnly extends boolean = false, + const Storage extends "local" | "sql" | "byo" = never, + const Health extends "ping" | "k8s" = never +>( + options?: { + readonly serialization?: "msgpack" | "ndjson" | undefined + readonly clientOnly?: ClientOnly | undefined + readonly storage?: Storage | undefined + readonly runnerHealth?: Health | undefined + readonly runnerHealthK8s?: { + readonly namespace?: string | undefined + readonly labelSelector?: string | undefined + } | undefined + readonly shardingConfig?: Partial | undefined + } +): ClientOnly extends true ? Layer.Layer< + Sharding | Runners.Runners | ("byo" extends Storage ? never : MessageStorage.MessageStorage), + ConfigError, + "local" extends Storage ? never + : "byo" extends Storage ? (MessageStorage.MessageStorage | RunnerStorage.RunnerStorage) + : SqlClient + > : + Layer.Layer< + Sharding | Runners.Runners | ("byo" extends Storage ? never : MessageStorage.MessageStorage), + SocketServer.SocketServerError | ConfigError, + "local" extends Storage ? never + : "byo" extends Storage ? (MessageStorage.MessageStorage | RunnerStorage.RunnerStorage) + : SqlClient + > => +{ + const layer: Layer.Layer = options?.clientOnly + // client only + ? Layer.provide(SocketRunner.layerClientOnly, layerClientProtocol) + // with server + : Layer.provide(SocketRunner.layer, [layerSocketServer, layerClientProtocol]) + + const runnerHealth: Layer.Layer = options?.clientOnly + ? Layer.empty as any + : options?.runnerHealth === "k8s" + ? RunnerHealth.layerK8s(options.runnerHealthK8s).pipe( + Layer.provide([NodeFileSystem.layer, layerHttpClientK8s]) + ) + : RunnerHealth.layerPing.pipe( + Layer.provide(Runners.layerRpc), + Layer.provide(layerClientProtocol) + ) + + return layer.pipe( + Layer.provide(runnerHealth), + Layer.provideMerge( + options?.storage === "local" + ? MessageStorage.layerNoop + : options?.storage === "byo" + ? Layer.empty + : Layer.orDie(SqlMessageStorage.layer) + ), + Layer.provide( + options?.storage === "local" + ? RunnerStorage.layerMemory + : options?.storage === "byo" + ? Layer.empty + : Layer.orDie(SqlRunnerStorage.layer) + ), + Layer.provide(ShardingConfig.layerFromEnv(options?.shardingConfig)), + Layer.provide( + options?.serialization === "ndjson" ? RpcSerialization.layerNdjson : RpcSerialization.layerMsgPack + ) + ) as any +} + +/** + * @since 1.0.0 + * @category Layers + */ +export const layerDispatcherK8s: Layer.Layer = Layer.scoped(NodeHttpClient.Dispatcher)( + Effect.gen(function*() { + const fs = yield* FileSystem.FileSystem + const caCertOption = yield* fs.readFileString("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt").pipe( + Effect.option + ) + if (caCertOption._tag === "Some") { + return yield* Effect.acquireRelease( + Effect.sync(() => + new Undici.Agent({ + connect: { + ca: caCertOption.value + } + }) + ), + (agent) => Effect.promise(() => agent.destroy()) + ) + } + + return yield* NodeHttpClient.makeDispatcher + }) +).pipe( + Layer.provide(NodeFileSystem.layer) +) +/** + * @since 1.0.0 + * @category Layers + */ +export const layerHttpClientK8s: Layer.Layer = Layer.fresh( + NodeHttpClient.layerUndiciWithoutDispatcher +).pipe( + Layer.provide(layerDispatcherK8s) +) diff --git a/packages/platform-node/src/index.ts b/packages/platform-node/src/index.ts index 2f0d8adbac0..36ef345a6ee 100644 --- a/packages/platform-node/src/index.ts +++ b/packages/platform-node/src/index.ts @@ -1,22 +1,12 @@ /** * @since 1.0.0 */ -export * as NodeClusterRunnerHttp from "./NodeClusterRunnerHttp.js" +export * as NodeClusterHttp from "./NodeClusterHttp.js" /** * @since 1.0.0 */ -export * as NodeClusterRunnerSocket from "./NodeClusterRunnerSocket.js" - -/** - * @since 1.0.0 - */ -export * as NodeClusterShardManagerHttp from "./NodeClusterShardManagerHttp.js" - -/** - * @since 1.0.0 - */ -export * as NodeClusterShardManagerSocket from "./NodeClusterShardManagerSocket.js" +export * as NodeClusterSocket from "./NodeClusterSocket.js" /** * @since 1.0.0 diff --git a/packages/platform/src/HttpLayerRouter.ts b/packages/platform/src/HttpLayerRouter.ts index 64d2294c6b4..fba772bc013 100644 --- a/packages/platform/src/HttpLayerRouter.ts +++ b/packages/platform/src/HttpLayerRouter.ts @@ -1084,7 +1084,7 @@ export const serve = | Request.On ) => Effect.Effect } ): Layer.Layer< - never, + A, Request.Without, HttpServer.HttpServer | Exclude | Exclude, HttpRouter> > => { @@ -1101,7 +1101,7 @@ export const serve = | Request.On return middleware ? HttpServer.serve(handler, middleware) : HttpServer.serve(handler) }).pipe( Layer.unwrapScoped, - Layer.provide(appLayer), + Layer.provideMerge(appLayer), Layer.provide(RouterLayer), options?.disableListenLog ? identity : HttpServer.withLogAddress ) as any diff --git a/packages/rpc/src/RpcClient.ts b/packages/rpc/src/RpcClient.ts index b01871f7031..64472f55b5f 100644 --- a/packages/rpc/src/RpcClient.ts +++ b/packages/rpc/src/RpcClient.ts @@ -268,9 +268,9 @@ export const makeNoSerialization: { + Effect.suspend(() => { isShutdown = true - return clearEntries(Exit.interrupt(fiberId)) + return clearEntries(Exit.interrupt(fiberIdTransientInterrupt)) }) ) @@ -365,6 +365,7 @@ export const makeNoSerialization: + let completed = false return Effect.onInterrupt( Effect.async((resume) => { const entry: ClientEntry = { @@ -372,6 +373,7 @@ export const makeNoSerialization: { @@ -399,6 +401,7 @@ export const makeNoSerialization: { + if (completed) return Effect.void entries.delete(id) const ids = Array.from(interruptors).flatMap((id) => Array.from(FiberId.toSet(id))) return Effect.zipRight( @@ -601,6 +604,8 @@ export const makeNoSerialization: ) as Effect.Effect } case "Defect": { + entries.clear() return write({ _tag: "Defect", clientId: 0, defect: decodeDefect(message.defect) }) } case "ClientProtocolError": { const exit = Exit.fail(message.error) return Effect.forEach( entries.keys(), - (requestId) => write({ _tag: "Exit", clientId: 0, requestId, exit: exit as any }) + (requestId) => { + entries.delete(requestId) + return write({ _tag: "Exit", clientId: 0, requestId, exit: exit as any }) + } ) } default: { @@ -1045,7 +1054,7 @@ const makePinger = Effect.fnUntraced(function*(writePing: Effect.Effect recievedPong = false return writePing }).pipe( - Effect.delay("5 seconds"), + Effect.delay("10 seconds"), Effect.ignore, Effect.forever, Effect.interruptible, diff --git a/packages/rpc/src/RpcServer.ts b/packages/rpc/src/RpcServer.ts index 12113495217..13aff1820c3 100644 --- a/packages/rpc/src/RpcServer.ts +++ b/packages/rpc/src/RpcServer.ts @@ -112,7 +112,7 @@ export const makeNoSerialization: ( ) const concurrencySemaphore = concurrency === "unbounded" ? undefined - : yield* Effect.makeSemaphore(concurrency) + : Effect.unsafeMakeSemaphore(concurrency).withPermits(1) type Client = { readonly id: number @@ -126,7 +126,7 @@ export const makeNoSerialization: ( const shutdownLatch = Effect.unsafeMakeLatch(false) yield* Scope.addFinalizer( scope, - Effect.fiberIdWith((fiberId) => { + Effect.suspend(() => { isShutdown = true for (const client of clients.values()) { client.ended = true @@ -135,7 +135,7 @@ export const makeNoSerialization: ( continue } for (const fiber of client.fibers.values()) { - fiber.unsafeInterruptAsFork(fiberId) + fiber.unsafeInterruptAsFork(fiberIdTransientInterrupt) } } if (clients.size === 0) { @@ -146,11 +146,11 @@ export const makeNoSerialization: ( ) const disconnect = (clientId: number) => - Effect.fiberIdWith((fiberId) => { + Effect.suspend(() => { const client = clients.get(clientId) if (!client) return Effect.void for (const fiber of client.fibers.values()) { - fiber.unsafeInterruptAsFork(fiberId) + fiber.unsafeInterruptAsFork(fiberIdTransientInterrupt) } clients.delete(clientId) return Effect.void @@ -221,7 +221,10 @@ export const makeNoSerialization: ( request: Request ): Effect.Effect => { if (client.fibers.has(request.id)) { - return Effect.interrupt + return Effect.flatMap( + Fiber.await(client.fibers.get(request.id)!), + () => handleRequest(requestFiber, client, request) + ) } const rpc = group.requests.get(request.tag) as any as Rpc.AnyWithProps const entry = context.unsafeMap.get(rpc?.key) as Rpc.Handler @@ -291,7 +294,10 @@ export const makeNoSerialization: ( const parentSpan = requestFiber.currentContext.unsafeMap.get(Tracer.ParentSpan.key) as Tracer.AnySpan | undefined effect = Effect.withSpan(effect, `${spanPrefix}.${request.tag}`, { captureStackTrace: false, - attributes: options.spanAttributes, + attributes: { + requestId: String(request.id), + ...options.spanAttributes + }, parent: enableSpanPropagation && request.spanId ? { _tag: "ExternalSpan", @@ -311,7 +317,7 @@ export const makeNoSerialization: ( }) } if (!isFork && concurrencySemaphore) { - effect = concurrencySemaphore.withPermits(1)(effect) + effect = concurrencySemaphore(effect) } const runtime = Runtime.make({ context: Context.merge(entry.context, requestFiber.currentContext), @@ -319,7 +325,6 @@ export const makeNoSerialization: ( runtimeFlags: RuntimeFlags.disable(Runtime.defaultRuntime.runtimeFlags, RuntimeFlags.Interruption) }) const fiber = Runtime.runFork(runtime, effect) - FiberSet.unsafeAdd(fiberSet, fiber) client.fibers.set(request.id, fiber) fiber.addObserver((exit) => { if (!responded && exit._tag === "Failure") { @@ -704,7 +709,8 @@ export const make: ( }).pipe( Effect.interruptible, Effect.tapErrorCause((cause) => Effect.logFatal("BUG: RpcServer protocol crashed", cause)), - Effect.onExit((exit) => Scope.close(scope, exit)) + Effect.onExit((exit) => Scope.close(scope, exit)), + Effect.withUnhandledErrorLogLevel(Option.none()) ) }) @@ -797,9 +803,10 @@ export class Protocol extends Context.Tag("@effect/rpc/RpcServer/Protocol") { writeRequest = writeRequest_ @@ -1127,7 +1137,9 @@ export const makeProtocolWorkerRunner: Effect.Effect< return Deferred.succeed(initialMessage, message.value) } return writeRequest(clientId, message) - }) + }).pipe( + Effect.withUnhandledErrorLogLevel(Option.none()) + ) yield* disconnects.take.pipe( Effect.tap((clientId) => { @@ -1336,7 +1348,8 @@ export const makeProtocolStdio = Effect.fnUntraced(function*(options: { * @since 1.0.0 * @category Interruption */ -export const fiberIdClientInterrupt = FiberId.make(-499, 0) +export const fiberIdClientInterrupt = FiberId.make(-499, 0) as FiberId.Runtime + +/** + * Fiber id used for transient interruptions. + * + * @since 1.0.0 + * @category Interruption + */ +export const fiberIdTransientInterrupt = FiberId.make(-503, 0) as FiberId.Runtime // internal @@ -1450,7 +1471,8 @@ const makeSocketProtocol = Effect.gen(function*() { }).pipe( Effect.interruptible, Effect.catchIf((error) => error.reason === "Close", () => Effect.void), - Effect.orDie + Effect.orDie, + Effect.withUnhandledErrorLogLevel(Option.none()) ) } diff --git a/packages/sql-pg/package.json b/packages/sql-pg/package.json index f38d6d48837..33f9cba4719 100644 --- a/packages/sql-pg/package.json +++ b/packages/sql-pg/package.json @@ -50,6 +50,8 @@ "@effect/platform": "workspace:^", "@effect/sql": "workspace:^", "@testcontainers/postgresql": "^10.25.0", + "@types/pg": "^8.15.6", + "@types/pg-cursor": "^2.7.2", "effect": "workspace:^" }, "peerDependencies": { @@ -59,6 +61,7 @@ "effect": "workspace:^" }, "dependencies": { - "postgres": "^3.4.4" + "pg": "^8.16.3", + "pg-cursor": "^2.15.3" } } diff --git a/packages/sql-pg/src/PgClient.ts b/packages/sql-pg/src/PgClient.ts index 9f835a7aef2..6ec7a25dbda 100644 --- a/packages/sql-pg/src/PgClient.ts +++ b/packages/sql-pg/src/PgClient.ts @@ -7,19 +7,23 @@ import type { Connection } from "@effect/sql/SqlConnection" import { SqlError } from "@effect/sql/SqlError" import type { Custom, Fragment, Primitive } from "@effect/sql/Statement" import * as Statement from "@effect/sql/Statement" +import * as Arr from "effect/Array" import * as Chunk from "effect/Chunk" import * as Config from "effect/Config" -import type { ConfigError } from "effect/ConfigError" +import type * as ConfigError from "effect/ConfigError" import * as Context from "effect/Context" import * as Duration from "effect/Duration" import * as Effect from "effect/Effect" +import * as Fiber from "effect/Fiber" import * as Layer from "effect/Layer" +import * as Option from "effect/Option" +import * as RcRef from "effect/RcRef" import * as Redacted from "effect/Redacted" -import type * as Scope from "effect/Scope" +import * as Scope from "effect/Scope" import * as Stream from "effect/Stream" -import type * as NodeStream from "node:stream" import type { ConnectionOptions } from "node:tls" -import postgres from "postgres" +import * as Pg from "pg" +import Cursor from "pg-cursor" const ATTR_DB_SYSTEM_NAME = "db.system.name" const ATTR_DB_NAMESPACE = "db.namespace" @@ -30,13 +34,13 @@ const ATTR_SERVER_PORT = "server.port" * @category type ids * @since 1.0.0 */ -export const TypeId: unique symbol = Symbol.for("@effect/sql-pg/PgClient") +export const TypeId: TypeId = "~@effect/sql-pg/PgClient" /** * @category type ids * @since 1.0.0 */ -export type TypeId = typeof TypeId +export type TypeId = "~@effect/sql-pg/PgClient" /** * @category models @@ -46,7 +50,6 @@ export interface PgClient extends Client.SqlClient { readonly [TypeId]: TypeId readonly config: PgClientConfig readonly json: (_: unknown) => Fragment - readonly array: (_: ReadonlyArray) => Fragment readonly listen: (channel: string) => Stream.Stream readonly notify: (channel: string, payload: string) => Effect.Effect } @@ -72,33 +75,11 @@ export interface PgClientConfig { readonly username?: string | undefined readonly password?: Redacted.Redacted | undefined - /** - * A function returning a custom socket to use. This parameter is not documented - * in the postgres.js's type signature. See their - * [readme](https://github.com/porsager/postgres?tab=readme-ov-file#connection-details) instead. - * - * @example - * ```ts - * import { AuthTypes, Connector } from "@google-cloud/cloud-sql-connector"; - * import { PgClient } from "@effect/sql-pg"; - * import { Config, Effect, Layer } from "effect" - * - * const layer = Effect.gen(function*() { - * const connector = new Connector(); - * const clientOpts = yield* Effect.promise(() => connector.getOptions({ - * instanceConnectionName: "project:region:instance", - * authType: AuthTypes.IAM, - * })); - * return PgClient.layer({ socket: clientOpts.stream, username: "iam-user" }); - * }).pipe(Layer.unwrapEffect) - * ``` - */ - readonly socket?: (() => NodeStream.Duplex) | undefined - readonly idleTimeout?: Duration.DurationInput | undefined readonly connectTimeout?: Duration.DurationInput | undefined readonly maxConnections?: number | undefined + readonly minConnections?: number | undefined readonly connectionTTL?: Duration.DurationInput | undefined readonly applicationName?: string | undefined @@ -107,31 +88,7 @@ export interface PgClientConfig { readonly transformResultNames?: ((str: string) => string) | undefined readonly transformQueryNames?: ((str: string) => string) | undefined readonly transformJson?: boolean | undefined - readonly fetchTypes?: boolean | undefined - readonly prepare?: boolean | undefined - /** - * A callback when postgres has a notice, see - * [readme](https://github.com/porsager/postgres?tab=readme-ov-file#connection-details). - * By default, postgres.js logs these with console.log. - * To silence notices, see the following example: - * @example - * ```ts - * import { PgClient } from "@effect/sql-pg"; - * import { Config, Layer } from "effect" - * - * const layer = PgClient.layer({ onnotice: Config.succeed(() => {}) }) - * ``` - */ - readonly onnotice?: (notice: postgres.Notice) => void - readonly types?: Record | undefined - - readonly debug?: postgres.Options<{}>["debug"] | undefined -} - -type PartialWithUndefined = { [K in keyof T]?: T[K] | undefined } - -interface PostgresOptions extends postgres.Options<{}> { - readonly socket?: (() => NodeStream.Duplex) | undefined + readonly types?: Pg.CustomTypesConfig | undefined } /** @@ -153,53 +110,39 @@ export const make = ( ).array : undefined - const opts: PartialWithUndefined = { - max: options.maxConnections ?? 10, - max_lifetime: options.connectionTTL - ? Math.round( - Duration.toMillis(Duration.decode(options.connectionTTL)) / 1000 - ) - : undefined, - idle_timeout: options.idleTimeout - ? Math.round( - Duration.toMillis(Duration.decode(options.idleTimeout)) / 1000 - ) - : undefined, - connect_timeout: options.connectTimeout - ? Math.round( - Duration.toMillis(Duration.decode(options.connectTimeout)) / 1000 - ) - : undefined, - + const pool = new Pg.Pool({ + connectionString: options.url ? Redacted.value(options.url) : undefined, + user: options.username, host: options.host, - port: options.port, - ssl: options.ssl, - path: options.path, database: options.database, - username: options.username, password: options.password ? Redacted.value(options.password) : undefined, - fetch_types: options.fetchTypes ?? true, - prepare: options.prepare ?? true, - onnotice: options.onnotice, - types: options.types, - debug: options.debug, - connection: { - application_name: options.applicationName ?? "@effect/sql-pg" - }, - socket: options.socket - } + ssl: options.ssl, + port: options.port, + connectionTimeoutMillis: options.connectTimeout + ? Duration.toMillis(options.connectTimeout) + : undefined, + idleTimeoutMillis: options.idleTimeout + ? Duration.toMillis(options.idleTimeout) + : undefined, + max: options.maxConnections, + min: options.minConnections, + maxLifetimeSeconds: options.connectionTTL + ? Duration.toSeconds(options.connectionTTL) + : undefined, + application_name: options.applicationName ?? "@effect/sql-pg", + types: options.types + }) - const client = options.url - ? postgres(Redacted.value(options.url), opts as any) - : postgres(opts as any) + pool.on("error", (_err) => { + }) yield* Effect.acquireRelease( Effect.tryPromise({ - try: () => client`select 1`, + try: () => pool.query("SELECT 1"), catch: (cause) => new SqlError({ cause, message: "PgClient: Failed to connect" }) }), () => - Effect.promise(() => client.end()).pipe( + Effect.promise(() => pool.end()).pipe( Effect.interruptible, Effect.timeoutOption(1000) ) @@ -215,15 +158,20 @@ export const make = ( ) class ConnectionImpl implements Connection { - constructor(private readonly pg: postgres.Sql<{}>) {} + readonly pg: Pg.Pool | Pg.PoolClient + constructor(pg: Pg.Pool | Pg.PoolClient) { + this.pg = pg + } - private run(query: postgres.PendingQuery | postgres.PendingValuesQuery) { + private run(query: string, params: ReadonlyArray) { return Effect.async, SqlError>((resume) => { - query.then( - (_) => resume(Effect.succeed(_)), - (cause) => resume(new SqlError({ cause, message: "Failed to execute statement" })) - ) - return Effect.sync(() => query.cancel()) + this.pg.query(query, params as any, (err, result) => { + if (err) { + resume(Effect.fail(new SqlError({ cause: err, message: "Failed to execute statement" }))) + } else { + resume(Effect.succeed(result.rows)) + } + }) }) } @@ -233,17 +181,40 @@ export const make = ( transformRows: (
(row: ReadonlyArray) => ReadonlyArray) | undefined ) { return transformRows - ? Effect.map(this.run(this.pg.unsafe(sql, params as any)), transformRows) - : this.run(this.pg.unsafe(sql, params as any)) + ? Effect.map(this.run(sql, params), transformRows) + : this.run(sql, params) } executeRaw(sql: string, params: ReadonlyArray) { - return this.run(this.pg.unsafe(sql, params as any)) + return Effect.async((resume) => { + this.pg.query(sql, params as any, (err, result) => { + if (err) { + resume(Effect.fail(new SqlError({ cause: err, message: "Failed to execute statement" }))) + } else { + resume(Effect.succeed(result)) + } + }) + }) } executeWithoutTransform(sql: string, params: ReadonlyArray) { - return this.run(this.pg.unsafe(sql, params as any)) + return this.run(sql, params) } executeValues(sql: string, params: ReadonlyArray) { - return this.run(this.pg.unsafe(sql, params as any).values()) + return Effect.async, SqlError>((resume) => { + this.pg.query( + { + text: sql, + rowMode: "array", + values: params as Array + }, + (err, result) => { + if (err) { + resume(Effect.fail(new SqlError({ cause: err, message: "Failed to execute statement" }))) + } else { + resume(Effect.succeed(result.rows)) + } + } + ) + }) } executeUnprepared( sql: string, @@ -257,38 +228,59 @@ export const make = ( params: ReadonlyArray, transformRows: ((row: ReadonlyArray) => ReadonlyArray) | undefined ) { - return Stream.mapChunks( - Stream.fromAsyncIterable( - this.pg.unsafe(sql, params as any).cursor(16) as AsyncIterable< - Array - >, - (cause) => new SqlError({ cause, message: "Failed to execute statement" }) - ), - Chunk.flatMap((rows) => Chunk.unsafeFromArray(transformRows ? transformRows(rows) : rows)) + // eslint-disable-next-line @typescript-eslint/no-this-alias + const self = this + return Effect.gen(function*() { + const cursor = yield* Effect.acquireRelease( + Effect.sync(() => self.pg.query(new Cursor(sql, params as any))), + (cursor) => Effect.sync(() => cursor.close()) + ) + const pull = Effect.async, Option.Option>((resume) => { + cursor.read(128, (err, rows) => { + if (err) { + resume(Effect.fail(Option.some(new SqlError({ cause: err, message: "Failed to execute statement" })))) + } else if (Arr.isNonEmptyArray(rows)) { + resume(Effect.succeed(Chunk.unsafeFromArray(transformRows ? transformRows(rows) as any : rows))) + } else { + resume(Effect.fail(Option.none())) + } + }) + }) + return Stream.repeatEffectChunkOption(pull) + }).pipe( + Stream.unwrapScoped ) } } + const reserveRaw = Effect.async((resume) => { + const fiber = Option.getOrThrow(Fiber.getCurrentFiber()) + const scope = Context.unsafeGet(fiber.currentContext, Scope.Scope) + pool.connect((err, client, release) => { + if (err) { + resume(Effect.fail(new SqlError({ cause: err, message: "Failed to acquire connection for transaction" }))) + } else { + resume(Effect.as(Scope.addFinalizer(scope, Effect.sync(release)), client!)) + } + }) + }) + const reserve = Effect.map(reserveRaw, (client) => new ConnectionImpl(client)) + + const listenClient = yield* RcRef.make({ + acquire: reserveRaw + }) + return Object.assign( yield* Client.make({ - acquirer: Effect.succeed(new ConnectionImpl(client)), - transactionAcquirer: Effect.map( - Effect.acquireRelease( - Effect.tryPromise({ - try: () => client.reserve(), - catch: (cause) => new SqlError({ cause, message: "Failed to reserve connection" }) - }), - (pg) => Effect.sync(() => pg.release()) - ), - (_) => new ConnectionImpl(_) - ), + acquirer: Effect.succeed(new ConnectionImpl(pool)), + transactionAcquirer: reserve, compiler, spanAttributes: [ ...(options.spanAttributes ? Object.entries(options.spanAttributes) : []), [ATTR_DB_SYSTEM_NAME, "postgresql"], - [ATTR_DB_NAMESPACE, opts.database ?? options.username ?? "postgres"], - [ATTR_SERVER_ADDRESS, opts.host ?? "localhost"], - [ATTR_SERVER_PORT, opts.port ?? 5432] + [ATTR_DB_NAMESPACE, options.database ?? options.username ?? "postgres"], + [ATTR_SERVER_ADDRESS, options.host ?? "localhost"], + [ATTR_SERVER_PORT, options.port ?? 5432] ], transformRows }), @@ -296,28 +288,42 @@ export const make = ( [TypeId]: TypeId as TypeId, config: { ...options, - host: client.options.host[0] ?? undefined, - port: client.options.port[0] ?? undefined, - username: client.options.user, - password: client.options.pass ? Redacted.make(client.options.pass) : undefined, - database: client.options.database + host: pool.options.host, + port: pool.options.port, + username: pool.options.user, + password: typeof pool.options.password === "string" ? Redacted.make(pool.options.password) : undefined, + database: pool.options.database }, json: (_: unknown) => PgJson(_), - array: (_: ReadonlyArray) => PgArray(_), listen: (channel: string) => - Stream.asyncPush((emit) => - Effect.acquireRelease( - Effect.tryPromise({ - try: () => client.listen(channel, (payload) => emit.single(payload)), - catch: (cause) => new SqlError({ cause, message: "Failed to listen" }) - }), - ({ unlisten }) => Effect.promise(() => unlisten()) + Stream.asyncPush(Effect.fnUntraced(function*(emit) { + const client = yield* RcRef.get(listenClient) + function onNotification(msg: Pg.Notification) { + if (msg.channel === channel && msg.payload) { + emit.single(msg.payload) + } + } + yield* Effect.addFinalizer(() => + Effect.promise(() => { + client.off("notification", onNotification) + return client.query(`UNLISTEN ${Pg.escapeIdentifier(channel)}`) + }) ) - ), + yield* Effect.tryPromise({ + try: () => client.query(`LISTEN ${Pg.escapeIdentifier(channel)}`), + catch: (cause) => new SqlError({ cause, message: "Failed to listen" }) + }) + client.on("notification", onNotification) + })), notify: (channel: string, payload: string) => - Effect.tryPromise({ - try: () => client.notify(channel, payload), - catch: (cause) => new SqlError({ cause, message: "Failed to notify" }) + Effect.async((resume) => { + pool.query(`NOTIFY ${Pg.escapeIdentifier(channel)}, $1`, [payload], (err) => { + if (err) { + resume(Effect.fail(new SqlError({ cause: err, message: "Failed to notify" }))) + } else { + resume(Effect.void) + } + }) }) } ) @@ -329,7 +335,7 @@ export const make = ( */ export const layerConfig = ( config: Config.Config.Wrap -): Layer.Layer => +): Layer.Layer => Layer.scopedContext( Config.unwrap(config).pipe( Effect.flatMap(make), @@ -347,7 +353,7 @@ export const layerConfig = ( */ export const layer = ( config: PgClientConfig -): Layer.Layer => +): Layer.Layer => Layer.scopedContext( Effect.map(make(config), (client) => Context.make(PgClient, client).pipe( @@ -363,8 +369,6 @@ export const makeCompiler = ( transform?: (_: string) => string, transformJson = true ): Statement.Compiler => { - const pg = postgres({ max: 0 }) - const transformValue = transformJson && transform ? Statement.defaultTransforms(transform).value : undefined @@ -393,33 +397,12 @@ export const makeCompiler = ( return [ placeholder(undefined), [ - pg.json( - withoutTransform || transformValue === undefined - ? type.i0 - : transformValue(type.i0) - ) as any + withoutTransform || transformValue === undefined + ? type.i0 + : transformValue(type.i0) ] ] } - case "PgArray": { - const param = pg.array(type.i0 as any) as any - const first = type.i0[0] - switch (typeof first) { - case "boolean": { - param.type = 1000 - break - } - case "number": { - param.type = 1022 - break - } - default: { - param.type = 1009 - break - } - } - return [placeholder(undefined), [param]] - } } } }) @@ -431,7 +414,7 @@ const escape = Statement.defaultEscape("\"") * @category custom types * @since 1.0.0 */ -export type PgCustom = PgJson | PgArray +export type PgCustom = PgJson /** * @category custom types @@ -443,14 +426,3 @@ interface PgJson extends Custom<"PgJson", unknown> {} * @since 1.0.0 */ const PgJson = Statement.custom("PgJson") - -/** - * @category custom types - * @since 1.0.0 - */ -interface PgArray extends Custom<"PgArray", ReadonlyArray> {} -/** - * @category custom types - * @since 1.0.0 - */ -const PgArray = Statement.custom("PgArray") diff --git a/packages/sql-pg/test/Client.test.ts b/packages/sql-pg/test/Client.test.ts index 32540725d33..687eb3c1ab2 100644 --- a/packages/sql-pg/test/Client.test.ts +++ b/packages/sql-pg/test/Client.test.ts @@ -101,33 +101,6 @@ it.layer(PgContainer.ClientLive, { timeout: "30 seconds" })("PgClient", (it) => expect(result[1]).toEqual(["Tim", "John", now]) })) - it.effect("json", () => - Effect.gen(function*() { - const sql = yield* PgClient.PgClient - const [query, params] = sql`SELECT ${sql.json({ a: 1 })}`.compile() - expect(query).toEqual(`SELECT $1`) - expect((params[0] as any).type).toEqual(3802) - })) - - it.effect("json transform", () => - Effect.gen(function*() { - const sql = yield* PgClient.PgClient - const [query, params] = compilerTransform.compile( - sql`SELECT ${sql.json({ aKey: 1 })}`, - false - ) - expect(query).toEqual(`SELECT $1`) - assert.deepEqual((params[0] as any).value, { a_key: 1 }) - })) - - it.effect("array", () => - Effect.gen(function*() { - const sql = yield* PgClient.PgClient - const [query, params] = sql`SELECT ${sql.array([1, 2, 3])}`.compile() - expect(query).toEqual(`SELECT $1`) - expect((params[0] as any).value).toEqual([1, 2, 3]) - })) - it("transform nested", () => { assert.deepEqual( transformsNested.array([ @@ -204,25 +177,6 @@ it.layer(PgContainer.ClientLive, { timeout: "30 seconds" })("PgClient", (it) => "INSERT INTO people (\"name\",\"age\",\"json\") VALUES ($1,$2,$3)" ) assert.lengthOf(params, 3) - expect((params[2] as any).type).toEqual(3802) - })) - - it.effect("insert array", () => - Effect.gen(function*() { - const sql = yield* PgClient.PgClient - const [query, params] = sql`INSERT INTO people ${ - sql.insert({ - name: "Tim", - age: 10, - array: sql.array([1, 2, 3]) - }) - }`.compile() - assert.strictEqual( - query, - "INSERT INTO people (\"name\",\"age\",\"array\") VALUES ($1,$2,$3)" - ) - assert.lengthOf(params, 3) - expect((params[2] as any).type).toEqual(1022) })) it.effect("update fragments", () => @@ -240,8 +194,6 @@ it.layer(PgContainer.ClientLive, { timeout: "30 seconds" })("PgClient", (it) => `UPDATE people SET json = data.json FROM (values ($1),($2)) AS data("json") WHERE created_at > $3` ) assert.lengthOf(params, 3) - expect((params[0] as any).type).toEqual(3802) - expect((params[1] as any).type).toEqual(3802) })) it.effect("onDialect", () => diff --git a/packages/workflow/README.md b/packages/workflow/README.md index ee6337f92dc..59fab27e295 100644 --- a/packages/workflow/README.md +++ b/packages/workflow/README.md @@ -6,7 +6,7 @@ Build and run durable workflows in TypeScript with Effect. ```typescript import { ClusterWorkflowEngine } from "@effect/cluster" -import { NodeClusterRunnerSocket, NodeRuntime } from "@effect/platform-node" +import { NodeClusterSocket, NodeRuntime } from "@effect/platform-node" import { PgClient } from "@effect/sql-pg" import { Activity, @@ -122,7 +122,7 @@ const EmailWorkflowLayer = EmailWorkflow.toLayer( // `ClusterWorkflowEngine.layer` Layer, and provide it with your cluster Runner // layer. const WorkflowEngineLayer = ClusterWorkflowEngine.layer.pipe( - Layer.provideMerge(NodeClusterRunnerSocket.layer({ storage: "sql" })), + Layer.provideMerge(NodeClusterSocket.layer()), Layer.provideMerge( PgClient.layer({ database: "effect_cluster", diff --git a/packages/workflow/src/Activity.ts b/packages/workflow/src/Activity.ts index 0b553dad2f3..445bc500adb 100644 --- a/packages/workflow/src/Activity.ts +++ b/packages/workflow/src/Activity.ts @@ -199,6 +199,7 @@ const makeExecute = Effect.fnUntraced(function*< const engine = yield* EngineTag const instance = yield* InstanceTag const attempt = yield* CurrentAttempt + yield* Effect.annotateCurrentSpan({ executionId: instance.executionId }) const result = yield* Workflow.wrapActivityResult( engine.activityExecute({ activity, @@ -213,4 +214,7 @@ const makeExecute = Effect.fnUntraced(function*< Schema.decode(activity.exitSchema)(result.exit) ) return yield* exit -}) +}, (effect, activity) => + Effect.withSpan(effect, activity.name, { + captureStackTrace: false + })) diff --git a/packages/workflow/src/Workflow.ts b/packages/workflow/src/Workflow.ts index 05d26c17fa1..e6b04d176ae 100644 --- a/packages/workflow/src/Workflow.ts +++ b/packages/workflow/src/Workflow.ts @@ -26,6 +26,41 @@ import type { WorkflowEngine, WorkflowInstance } from "./WorkflowEngine.js" */ export const TypeId: unique symbol = Symbol.for("@effect/workflow/Workflow") +/** + * @since 1.0.0 + */ +export declare namespace Workflow { + /** + * Extracts the type of the Payload of a `Workflow`. + * + * @since 1.0.0 + * @category Type-level Utils + */ + export type Payload> = W extends Workflow + ? Payload["Type"] + : never + + /** + * Extracts the type of the Success of a `Workflow`. + * + * @since 1.0.0 + * @category Type-level Utils + */ + export type Success> = W extends Workflow + ? Success["Type"] + : never + + /** + * Extracts the type of the Error of a `Workflow`. + * + * @since 1.0.0 + * @category Type-level Utils + */ + export type Error> = W extends Workflow + ? Error["Type"] + : never +} + /** * @since 1.0.0 * @category Symbols @@ -300,7 +335,11 @@ export const make = < parent: Option.getOrUndefined(parentInstance) }) if (Option.isSome(parentInstance)) { - result = yield* wrapActivityResult(run, (result) => result._tag === "Suspended") + result = yield* run + if (result._tag === "Suspended") { + yield* engine.resume(self, executionId) + result = yield* wrapActivityResult(run, (result) => result._tag === "Suspended") + } if (result._tag === "Suspended") { return yield* suspend(parentInstance.value) } @@ -530,31 +569,31 @@ export const intoResult = ( const instance = Context.get(context, InstanceTag) const captureDefects = Context.get(instance.workflow.annotations, CaptureDefects) const suspendOnFailure = Context.get(instance.workflow.annotations, SuspendOnFailure) - return Effect.uninterruptibleMask((restore) => - restore(effect).pipe( - // So we can use external interruption to suspend a workflow - Effect.fork, - Effect.flatMap((fiber) => Effect.onInterrupt(Fiber.join(fiber), () => Fiber.interrupt(fiber))), - suspendOnFailure ? - Effect.catchAllCause((cause) => { - instance.suspended = true - if (!Cause.isInterruptedOnly(cause)) { - instance.cause = Cause.die(Cause.squash(cause)) - } - return Effect.interrupt - }) : - identity, - Effect.scoped, - Effect.matchCauseEffect({ - onSuccess: (value) => Effect.succeed(new Complete({ exit: Exit.succeed(value) })), - onFailure: (cause): Effect.Effect> => - instance.suspended - ? Effect.succeed(new Suspended({ cause: instance.cause })) - : (!instance.interrupted && Cause.isInterruptedOnly(cause)) || (!captureDefects && Cause.isDie(cause)) - ? Effect.failCause(cause as Cause.Cause) - : Effect.succeed(new Complete({ exit: Exit.failCause(cause) })) - }) - ) + return effect.pipe( + // So we can use external interruption to suspend a workflow + Effect.fork, + Effect.flatMap((fiber) => Effect.onInterrupt(Fiber.join(fiber), () => Fiber.interrupt(fiber))), + Effect.interruptible, + suspendOnFailure ? + Effect.catchAllCause((cause) => { + instance.suspended = true + if (!Cause.isInterruptedOnly(cause)) { + instance.cause = Cause.die(Cause.squash(cause)) + } + return Effect.interrupt + }) : + identity, + Effect.scoped, + Effect.matchCauseEffect({ + onSuccess: (value) => Effect.succeed(new Complete({ exit: Exit.succeed(value) })), + onFailure: (cause): Effect.Effect> => + instance.suspended + ? Effect.succeed(new Suspended({ cause: instance.cause })) + : (!instance.interrupted && Cause.isInterruptedOnly(cause)) || (!captureDefects && Cause.isDie(cause)) + ? Effect.failCause(cause as Cause.Cause) + : Effect.succeed(new Complete({ exit: Exit.failCause(cause) })) + }), + Effect.uninterruptible ) }) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 330986cad2a..4ac65f6fd4d 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -763,7 +763,7 @@ importers: version: 10.28.0 drizzle-orm: specifier: ^0.43.1 - version: 0.43.1(@cloudflare/workers-types@4.20250715.0)(@libsql/client@0.12.0)(@op-engineering/op-sqlite@7.1.0(react-native@0.80.1(@babel/core@7.28.0)(@types/react@19.1.8)(react@19.1.0))(react@19.1.0))(@opentelemetry/api@1.9.0)(@types/better-sqlite3@7.6.13)(better-sqlite3@11.10.0)(bun-types@1.2.18(@types/react@19.1.8))(kysely@0.28.2)(mysql2@3.14.2)(postgres@3.4.7) + version: 0.43.1(@cloudflare/workers-types@4.20250715.0)(@libsql/client@0.12.0)(@op-engineering/op-sqlite@7.1.0(react-native@0.80.1(@babel/core@7.28.0)(@types/react@19.1.8)(react@19.1.0))(react@19.1.0))(@opentelemetry/api@1.9.0)(@types/better-sqlite3@7.6.13)(@types/pg@8.15.6)(better-sqlite3@11.10.0)(bun-types@1.2.18(@types/react@19.1.8))(kysely@0.28.2)(mysql2@3.14.2)(pg@8.16.3)(postgres@3.4.7) effect: specifier: workspace:^ version: link:../effect @@ -880,9 +880,12 @@ importers: packages/sql-pg: dependencies: - postgres: - specifier: ^3.4.4 - version: 3.4.7 + pg: + specifier: ^8.16.3 + version: 8.16.3 + pg-cursor: + specifier: ^2.15.3 + version: 2.15.3(pg@8.16.3) devDependencies: '@effect/experimental': specifier: workspace:^ @@ -896,6 +899,12 @@ importers: '@testcontainers/postgresql': specifier: ^10.25.0 version: 10.28.0 + '@types/pg': + specifier: ^8.15.6 + version: 8.15.6 + '@types/pg-cursor': + specifier: ^2.7.2 + version: 2.7.2 effect: specifier: workspace:^ version: link:../effect @@ -2817,6 +2826,12 @@ packages: '@types/normalize-package-data@2.4.4': resolution: {integrity: sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==} + '@types/pg-cursor@2.7.2': + resolution: {integrity: sha512-m3xT8bVFCvx98LuzbvXyuCdT/Hjdd/v8ml4jL4K1QF70Y8clOfCFdgoaEB1FWdcSwcpoFYZTJQaMD9/GQ27efQ==} + + '@types/pg@8.15.6': + resolution: {integrity: sha512-NoaMtzhxOrubeL/7UZuNTrejB4MPAJ0RpxZqXQf2qXuVlTPuG6Y8p4u9dKRaue4yjmC7ZhzVO2/Yyyn25znrPQ==} + '@types/react@19.1.8': resolution: {integrity: sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g==} @@ -4105,6 +4120,7 @@ packages: eslint-plugin-markdown@4.0.1: resolution: {integrity: sha512-5/MnGvYU0i8MbHH5cg8S+Vl3DL+bqRNYshk1xUO86DilNBaxtTkhH+5FD0/yO03AmlI6+lfNFdk2yOw72EPzpA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + deprecated: Please use @eslint/markdown instead peerDependencies: eslint: '>=8' @@ -5697,6 +5713,45 @@ packages: resolution: {integrity: sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==} engines: {node: '>= 14.16'} + pg-cloudflare@1.2.7: + resolution: {integrity: sha512-YgCtzMH0ptvZJslLM1ffsY4EuGaU0cx4XSdXLRFae8bPP4dS5xL1tNB3k2o/N64cHJpwU7dxKli/nZ2lUa5fLg==} + + pg-connection-string@2.9.1: + resolution: {integrity: sha512-nkc6NpDcvPVpZXxrreI/FOtX3XemeLl8E0qFr6F2Lrm/I8WOnaWNhIPK2Z7OHpw7gh5XJThi6j6ppgNoaT1w4w==} + + pg-cursor@2.15.3: + resolution: {integrity: sha512-eHw63TsiGtFEfAd7tOTZ+TLy+i/2ePKS20H84qCQ+aQ60pve05Okon9tKMC+YN3j6XyeFoHnaim7Lt9WVafQsA==} + peerDependencies: + pg: ^8 + + pg-int8@1.0.1: + resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==} + engines: {node: '>=4.0.0'} + + pg-pool@3.10.1: + resolution: {integrity: sha512-Tu8jMlcX+9d8+QVzKIvM/uJtp07PKr82IUOYEphaWcoBhIYkoHpLXN3qO59nAI11ripznDsEzEv8nUxBVWajGg==} + peerDependencies: + pg: '>=8.0' + + pg-protocol@1.10.3: + resolution: {integrity: sha512-6DIBgBQaTKDJyxnXaLiLR8wBpQQcGWuAESkRBX/t6OwA8YsqP+iVSiond2EDy6Y/dsGk8rh/jtax3js5NeV7JQ==} + + pg-types@2.2.0: + resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==} + engines: {node: '>=4'} + + pg@8.16.3: + resolution: {integrity: sha512-enxc1h0jA/aq5oSDMvqyW3q89ra6XIIDZgCX9vkMrnz5DFTw/Ny3Li2lFQ+pt3L6MCgm/5o2o8HW9hiJji+xvw==} + engines: {node: '>= 16.0.0'} + peerDependencies: + pg-native: '>=3.0.1' + peerDependenciesMeta: + pg-native: + optional: true + + pgpass@1.0.5: + resolution: {integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==} + picocolors@1.1.1: resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} @@ -5751,6 +5806,22 @@ packages: resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} engines: {node: ^10 || ^12 || >=14} + postgres-array@2.0.0: + resolution: {integrity: sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==} + engines: {node: '>=4'} + + postgres-bytea@1.0.0: + resolution: {integrity: sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==} + engines: {node: '>=0.10.0'} + + postgres-date@1.0.7: + resolution: {integrity: sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==} + engines: {node: '>=0.10.0'} + + postgres-interval@1.2.0: + resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==} + engines: {node: '>=0.10.0'} + postgres@3.4.7: resolution: {integrity: sha512-Jtc2612XINuBjIl/QTWsV5UvE8UHuNblcO3vVADSrKsrc6RqGX6lOW1cEo3CM2v0XG4Nat8nI+YM7/f26VxXLw==} engines: {node: '>=12'} @@ -6202,6 +6273,10 @@ packages: split-ca@1.0.1: resolution: {integrity: sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ==} + split2@4.2.0: + resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==} + engines: {node: '>= 10.x'} + sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} @@ -8730,6 +8805,17 @@ snapshots: '@types/normalize-package-data@2.4.4': {} + '@types/pg-cursor@2.7.2': + dependencies: + '@types/node': 22.16.4 + '@types/pg': 8.15.6 + + '@types/pg@8.15.6': + dependencies: + '@types/node': 22.16.4 + pg-protocol: 1.10.3 + pg-types: 2.2.0 + '@types/react@19.1.8': dependencies: csstype: 3.1.3 @@ -9887,17 +9973,19 @@ snapshots: dotenv@8.6.0: {} - drizzle-orm@0.43.1(@cloudflare/workers-types@4.20250715.0)(@libsql/client@0.12.0)(@op-engineering/op-sqlite@7.1.0(react-native@0.80.1(@babel/core@7.28.0)(@types/react@19.1.8)(react@19.1.0))(react@19.1.0))(@opentelemetry/api@1.9.0)(@types/better-sqlite3@7.6.13)(better-sqlite3@11.10.0)(bun-types@1.2.18(@types/react@19.1.8))(kysely@0.28.2)(mysql2@3.14.2)(postgres@3.4.7): + drizzle-orm@0.43.1(@cloudflare/workers-types@4.20250715.0)(@libsql/client@0.12.0)(@op-engineering/op-sqlite@7.1.0(react-native@0.80.1(@babel/core@7.28.0)(@types/react@19.1.8)(react@19.1.0))(react@19.1.0))(@opentelemetry/api@1.9.0)(@types/better-sqlite3@7.6.13)(@types/pg@8.15.6)(better-sqlite3@11.10.0)(bun-types@1.2.18(@types/react@19.1.8))(kysely@0.28.2)(mysql2@3.14.2)(pg@8.16.3)(postgres@3.4.7): optionalDependencies: '@cloudflare/workers-types': 4.20250715.0 '@libsql/client': 0.12.0 '@op-engineering/op-sqlite': 7.1.0(react-native@0.80.1(@babel/core@7.28.0)(@types/react@19.1.8)(react@19.1.0))(react@19.1.0) '@opentelemetry/api': 1.9.0 '@types/better-sqlite3': 7.6.13 + '@types/pg': 8.15.6 better-sqlite3: 11.10.0 bun-types: 1.2.18(@types/react@19.1.8) kysely: 0.28.2 mysql2: 3.14.2 + pg: 8.16.3 postgres: 3.4.7 dunder-proto@1.0.1: @@ -12005,6 +12093,45 @@ snapshots: pathval@2.0.1: {} + pg-cloudflare@1.2.7: + optional: true + + pg-connection-string@2.9.1: {} + + pg-cursor@2.15.3(pg@8.16.3): + dependencies: + pg: 8.16.3 + + pg-int8@1.0.1: {} + + pg-pool@3.10.1(pg@8.16.3): + dependencies: + pg: 8.16.3 + + pg-protocol@1.10.3: {} + + pg-types@2.2.0: + dependencies: + pg-int8: 1.0.1 + postgres-array: 2.0.0 + postgres-bytea: 1.0.0 + postgres-date: 1.0.7 + postgres-interval: 1.2.0 + + pg@8.16.3: + dependencies: + pg-connection-string: 2.9.1 + pg-pool: 3.10.1(pg@8.16.3) + pg-protocol: 1.10.3 + pg-types: 2.2.0 + pgpass: 1.0.5 + optionalDependencies: + pg-cloudflare: 1.2.7 + + pgpass@1.0.5: + dependencies: + split2: 4.2.0 + picocolors@1.1.1: {} picomatch@2.3.1: {} @@ -12046,7 +12173,18 @@ snapshots: picocolors: 1.1.1 source-map-js: 1.2.1 - postgres@3.4.7: {} + postgres-array@2.0.0: {} + + postgres-bytea@1.0.0: {} + + postgres-date@1.0.7: {} + + postgres-interval@1.2.0: + dependencies: + xtend: 4.0.2 + + postgres@3.4.7: + optional: true prebuild-install@7.1.3: dependencies: @@ -12637,6 +12775,8 @@ snapshots: split-ca@1.0.1: {} + split2@4.2.0: {} + sprintf-js@1.0.3: {} sprintf-js@1.1.3: {}