Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ askama = "0.12.1"
atty = "0.2.14"
axum = "0.7.7"
chrono = "0.4.40"
dashmap = "6.1"
fern = {version = "0.7.1", features = ["colored"]}
gethostname = "0.5.0"
log = "0.4.22"
Expand All @@ -21,6 +22,7 @@ slog-stdlog = "4.1.1"
stderrlog = "0.6.0"
structopt = "0.3.26"
tokio = {version = "1.40.0", features = ["full", "test-util", "tracing", "macros", "rt-multi-thread"] }
tokio-stream = "0.1"
tonic = "0.12.2"

[build-dependencies]
Expand Down
14 changes: 10 additions & 4 deletions src/bin/lighthouse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,11 @@
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

use std::net::SocketAddr;
use structopt::StructOpt;
use torchft::lighthouse::{Lighthouse, LighthouseOpt};
use torchft::lighthouse::LighthouseOpt;
use torchft::router::Router;
use torchftpb::lighthouse_service_server::LighthouseServiceServer;

#[tokio::main(flavor = "multi_thread", worker_threads = 4)]
async fn main() {
Expand All @@ -17,7 +20,10 @@ async fn main() {
.unwrap();

let opt = LighthouseOpt::from_args();
let lighthouse = Lighthouse::new(opt).await.unwrap();

lighthouse.run().await.unwrap();
let router = Router::new(opt.clone());
Server::builder()
.add_service(LighthouseServiceServer::new(router))
.serve(opt.bind.parse::<SocketAddr>().unwrap())
.await
.unwrap();
}
79 changes: 62 additions & 17 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,11 @@ pub mod lighthouse;
pub mod manager;
mod net;
mod retry;
mod router;
mod timeout;

pub use crate::router::Router;

use anyhow::Result;
use atty::Stream;
use core::time::Duration;
Expand All @@ -21,6 +24,7 @@ use std::thread::available_parallelism;
use structopt::StructOpt;
use tokio::runtime::Runtime;
use tokio::task::JoinHandle;
use tokio_stream::wrappers::TcpListenerStream;
use tonic::transport::Channel;
use tonic::Status;

Expand All @@ -33,7 +37,9 @@ pub mod torchftpb {
}

use crate::torchftpb::lighthouse_service_client::LighthouseServiceClient;
use crate::torchftpb::lighthouse_service_server::LighthouseServiceServer;
use crate::torchftpb::manager_service_client::ManagerServiceClient;
use crate::torchftpb::LighthouseHeartbeatRequest;
use crate::torchftpb::{
CheckpointMetadataRequest, LighthouseHeartbeatRequest, LighthouseQuorumRequest,
ManagerQuorumRequest, ShouldCommitRequest,
Expand Down Expand Up @@ -339,9 +345,12 @@ fn lighthouse_main(py: Python<'_>) -> PyResult<()> {
}

async fn lighthouse_main_async(opt: lighthouse::LighthouseOpt) -> Result<()> {
let lighthouse = lighthouse::Lighthouse::new(opt).await?;
let router = Router::new(opt.clone());

lighthouse.run().await?;
tonic::transport::Server::builder()
.add_service(LighthouseServiceServer::new(router))
.serve(opt.bind.parse::<std::net::SocketAddr>()?)
.await?;

Ok(())
}
Expand Down Expand Up @@ -479,13 +488,19 @@ fn convert_quorum(py: Python, q: &torchftpb::Quorum) -> PyResult<Quorum> {
struct LighthouseClient {
client: LighthouseServiceClient<Channel>,
runtime: Runtime,
room_id: Option<String>,
}

#[pymethods]
impl LighthouseClient {
#[pyo3(signature = (addr, connect_timeout))]
#[pyo3(signature = (addr, connect_timeout, room_id = None))]
#[new]
fn new(py: Python<'_>, addr: String, connect_timeout: Duration) -> PyResult<Self> {
fn new(
py: Python<'_>,
addr: String,
connect_timeout: Duration,
room_id: Option<String>,
) -> PyResult<Self> {
py.allow_threads(move || {
let runtime = tokio::runtime::Builder::new_multi_thread()
.worker_threads(num_threads())
Expand All @@ -498,6 +513,7 @@ impl LighthouseClient {
Ok(Self {
client: client,
runtime: runtime,
room_id: room_id,
})
})
}
Expand Down Expand Up @@ -553,6 +569,8 @@ impl LighthouseClient {
}),
});

let mut request = self.add_room_header(request);

// This timeout is processed on the server side so we also enable
// keep alives to detect server health.
request.set_timeout(timeout);
Expand Down Expand Up @@ -581,13 +599,29 @@ impl LighthouseClient {
) -> Result<(), StatusError> {
py.allow_threads(move || {
let mut req = tonic::Request::new(LighthouseHeartbeatRequest { replica_id });
let mut req = self.add_room_header(req);
req.set_timeout(timeout);
self.runtime.block_on(self.client.clone().heartbeat(req))?;
Ok(())
})
}
}

impl LighthouseClient {
/// Attach `"room-id"` header if `self.room_id` is Some(_)
fn add_room_header<T>(&self, mut req: tonic::Request<T>) -> tonic::Request<T> {
if let Some(ref id) = self.room_id {
use tonic::metadata::MetadataValue;
req.metadata_mut().insert(
crate::router::ROOM_ID_HEADER,
MetadataValue::try_from(id.as_str()).expect("room-id ascii"),
);
}
req
}

}

/// LighthouseServer is a GRPC server for the lighthouse service.
///
/// It is used to coordinate the ManagerServer for each replica group.
Expand All @@ -603,7 +637,7 @@ impl LighthouseClient {
/// heartbeat_timeout_ms (int): The timeout for heartbeats.
#[pyclass]
struct LighthouseServer {
lighthouse: Arc<lighthouse::Lighthouse>,
bind: String,
handle: JoinHandle<Result<()>>,
_runtime: Runtime,
}
Expand Down Expand Up @@ -631,19 +665,30 @@ impl LighthouseServer {
.enable_all()
.build()?;

let lighthouse = rt
.block_on(lighthouse::Lighthouse::new(lighthouse::LighthouseOpt {
bind: bind,
min_replicas: min_replicas,
join_timeout_ms: join_timeout_ms,
quorum_tick_ms: quorum_tick_ms,
heartbeat_timeout_ms: heartbeat_timeout_ms,
}))
.map_err(|e| PyRuntimeError::new_err(e.to_string()))?;
let opt = lighthouse::LighthouseOpt {
bind: bind.clone(),
min_replicas,
join_timeout_ms,
quorum_tick_ms,
heartbeat_timeout_ms,
};

let listener = rt.block_on(tokio::net::TcpListener::bind(&bind))?;
let bound_sock = listener.local_addr()?;
let bound = format!("http://{}", bound_sock);
let incoming = TcpListenerStream::new(listener);

let handle = rt.spawn(async move {
tonic::transport::Server::builder()
.add_service(LighthouseServiceServer::new(Router::new(opt.clone())))
.serve_with_incoming(incoming)
.await
.map_err(|e: tonic::transport::Error| anyhow::anyhow!(e))
});

Ok(Self {
handle: rt.spawn(lighthouse.clone().run()),
lighthouse: lighthouse,
bind: bound,
handle,
_runtime: rt,
})
})
Expand All @@ -654,7 +699,7 @@ impl LighthouseServer {
/// Returns:
/// str: The address of the lighthouse server.
fn address(&self) -> PyResult<String> {
Ok(self.lighthouse.address().to_string())
Ok(self.bind.clone())
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this unfortunately isn't sufficient -- bind could be something like "0.0.0.0:0" which will bind to a random port. Address needs to be the routable http address i.e. http://foo.bar:1324

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, perhaps we could use similar calls as the Lighthouse class uses to resolve host IP and address? Will include a version of this in next commit, though am also down to change it

}

/// shutdown shuts down the lighthouse server.
Expand Down
2 changes: 1 addition & 1 deletion src/lighthouse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ impl ChangeLogger {
}
}

#[derive(StructOpt, Debug)]
#[derive(StructOpt, Debug, Clone)]
#[structopt()]
pub struct LighthouseOpt {
// bind is the address to bind the server to.
Expand Down
88 changes: 88 additions & 0 deletions src/router.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
use std::sync::Arc;

use dashmap::{mapref::entry::Entry, DashMap};
use tonic::{Request, Response, Status};

use crate::{
lighthouse::{Lighthouse, LighthouseOpt},
torchftpb::{
lighthouse_service_server::LighthouseService, LighthouseHeartbeatRequest,
LighthouseHeartbeatResponse, LighthouseQuorumRequest, LighthouseQuorumResponse,
},
};

/// Metadata header for both client and router
pub const ROOM_ID_HEADER: &str = "room-id";

/// Top-level service registered with tonic’s `Server::builder()`
#[derive(Clone)]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why does Router need to be Cloneable?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I mainly made Router Cloneable so that calls to tonic's add_service would compile when constructing the LighthouseServer in src/bin/lighthouse.rs and src/lib.rs

pub struct Router {
rooms: Arc<DashMap<String, Arc<Lighthouse>>>,
tmpl_opt: LighthouseOpt, // (cloned for each new room)
}

/// Designates a single tonic gRPC server into many logical “rooms.”
/// Inspects the `room-id` metadata header on each request, then
/// lazily creates or reuses an Arc<Lighthouse> for that namespace
impl Router {
/// Create a new router given the CLI/config options that are
/// normally passed straight to `Lighthouse::new`.
pub fn new(tmpl_opt: LighthouseOpt) -> Self {
Self {
rooms: Arc::new(DashMap::new()),
tmpl_opt,
}
}

/// Room lookup: creation if it doesn't exist, access if it does
async fn room(&self, id: &str) -> Arc<Lighthouse> {
// 1. Quick optimistic read (no locking contention).
if let Some(handle) = self.rooms.get(id) {
return handle.clone();
}

// 2. Build the Lighthouse instance *off the map* so
// we don't hold any guard across `.await`.
let new_room = Lighthouse::new(self.tmpl_opt.clone())
.await
.expect("failed to create Lighthouse");

// 3. Second pass: insert if still vacant, otherwise reuse
// whatever another task inserted first.
match self.rooms.entry(id.to_owned()) {
Entry::Occupied(entry) => entry.get().clone(),
Entry::Vacant(entry) => {
entry.insert(new_room.clone());
new_room
}
}
}

/// Extracts `"room-id"` from metadata, defaulting to `"default"`.
fn extract_room_id(meta: &tonic::metadata::MetadataMap) -> &str {
meta.get(ROOM_ID_HEADER)
.and_then(|v| v.to_str().ok())
.unwrap_or("default")
}
}

#[tonic::async_trait]
impl LighthouseService for Router {
async fn quorum(
&self,
req: Request<LighthouseQuorumRequest>,
) -> Result<Response<LighthouseQuorumResponse>, Status> {
let id = Self::extract_room_id(req.metadata()).to_owned();
let room = self.room(&id).await;
<Arc<Lighthouse> as LighthouseService>::quorum(&room, req).await
}

async fn heartbeat(
&self,
req: Request<LighthouseHeartbeatRequest>,
) -> Result<Response<LighthouseHeartbeatResponse>, Status> {
let id = Self::extract_room_id(req.metadata()).to_owned();
let room = self.room(&id).await;
<Arc<Lighthouse> as LighthouseService>::heartbeat(&room, req).await
}
}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is fine as is since this is fairly minimal boilerplate per request but I think we can do even better.

By doing this at the Service layer instead of LighthouseService layer we can have it automatically work for all endpoints on the LighthouseService

Can you look into this and see how feasible it is? If it's not any cleaner we can land this as is

Some pointers:

There's also https://github.com/teimuraz/tonic-middleware which might be useful

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I tried an initial attempt to do the routing at the Service layer rather than the LighthouseService layer, but have had trouble adapting between the initial tonic message types (tonic::Request/Response) and the Tower message types (http::Request/Response) - tonic::Request/Response wraps the body in tonic::body::BoxBody and carries gRPC-specific extensions, while the Tower stack we’re intercepting expects a bare http::Request/Response<B> where the body implements HttpBody. I haven't yet found a concise way to do this.

If I were to keep at this, I'd see if I could get something working that relies more on tonic-middleware - perhaps there's a way to stay entirely in the tonic domain that keeps the implementation and debugging cleaner?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

mixing the two is a bit tricky -- we probably need to stay at the tower layer. Why do you need to access the tonic::Request/Response objects? It's all HTTP at the end of the day so seems like we should be able to operate at the tower/http layer and view the metadata as a header?

middleware might work though it may be too high level

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah I see, it became easier when I had router.rs operate entirely at the tower layer rather than trying to mix Service and tonic. Most recent commit has router.rs at the tower level, which lets us start the lighthouse server with a call to
Server::builder().add_service(router).serve(addr)

46 changes: 46 additions & 0 deletions torchft/multi_quorum_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
"""
Validate that one Lighthouse server can host isolated quorums
for multiple logical rooms (job IDs) via `room-id` metadata header.
"""

from __future__ import annotations
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could move this test to lighthouse_test.py to keep it with the rest of the lighthouse tests


import datetime as _dt
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we usually just do from datetime import timedelta

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sg, including in next commit (moving test into lighthouse_test.py and will use existing imports)


import pytest

import torchft._torchft as ext
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we use the torchft.coordination API for this test instead?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sg, including in next commit


_TIMEOUT = _dt.timedelta(seconds=3) # connect + RPC timeout


def _client(addr: str, room: str) -> ext.LighthouseClient:
"""Utility: create a client with a logical room-id."""
return ext.LighthouseClient(addr, _TIMEOUT, room)


@pytest.mark.asyncio
async def test_multi_room_quorums() -> None:
# 1) one server, any free port
server = ext.LighthouseServer("[::]:0", 1)
addr = server.address()

# 2) two clients in two separate rooms
a = _client(addr, "jobA")
b = _client(addr, "jobB")

# 3) explicit heartbeats (exercises RPC path)
a.heartbeat("a0")
b.heartbeat("b0")

# 4) ask for a quorum from each room
qa = a.quorum("a0", _TIMEOUT)
qb = b.quorum("b0", _TIMEOUT)

# 5) verify the rooms are independent
assert qa.quorum_id == qb.quorum_id == 1
assert len(qa.participants) == 1 and qa.participants[0].replica_id == "a0"
assert len(qb.participants) == 1 and qb.participants[0].replica_id == "b0"

# 6) shutdown
server.shutdown()
Loading