From 3cd665358a931aacb8ef19752201cfd44007197b Mon Sep 17 00:00:00 2001 From: Faizan Ali Date: Wed, 23 Apr 2025 01:23:36 +0530 Subject: [PATCH 1/9] Add ALLOCATE_ENDPOINT_ID control message structures Add MCTP control message structures for the ALLOCATE_ENDPOINT_ID command to support bridge endpoint EID pool allocation. - Add mctp_ctrl_cmd_alloc_eid request/response structure Signed-off-by: Faizan Ali --- src/mctp-control-spec.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/mctp-control-spec.h b/src/mctp-control-spec.h index 586746c..877ff0e 100644 --- a/src/mctp-control-spec.h +++ b/src/mctp-control-spec.h @@ -181,6 +181,28 @@ struct mctp_ctrl_resp_resolve_endpoint_id { // ... uint8_t physical_address[N] } __attribute__((__packed__)); +typedef enum { + mctp_ctrl_cmd_alloc_eid_alloc_eid = 0, + mctp_ctrl_cmd_alloc_eid_force_alloc = 1, + mctp_ctrl_cmd_alloc_eid_get_alloc_info = 2, + mctp_ctrl_cmd_alloc_eid_reserved = 3 +} mctp_ctrl_cmd_alloc_eid_op; + +struct mctp_ctrl_cmd_alloc_eid { + struct mctp_ctrl_msg_hdr ctrl_hdr; + uint8_t alloc_eid_op; + uint8_t pool_size; + uint8_t start_eid; +} __attribute__((__packed__)); + +struct mctp_ctrl_resp_alloc_eid { + struct mctp_ctrl_msg_hdr ctrl_hdr; + uint8_t completion_code; + uint8_t status; + uint8_t eid_pool_size; + uint8_t eid_set; +} __attribute__((__packed__)); + #define MCTP_CTRL_HDR_MSG_TYPE 0 #define MCTP_CTRL_HDR_FLAG_REQUEST (1 << 7) #define MCTP_CTRL_HDR_FLAG_DGRAM (1 << 6) From 9f2978da4e75413a29aea60cead302575af654dd Mon Sep 17 00:00:00 2001 From: Faizan Ali Date: Thu, 17 Jul 2025 17:34:14 +0530 Subject: [PATCH 2/9] Introduce new max mctp bridge pool size configuration * For mctp bridge to have contiguous bridge EID with it's downstream endpoints, we introduce max_pool_size, bus-owner configuration, to be assumed pool size before we get it's actual size via SET_ENDPOINT_ID command response. Signed-off-by: Faizan Ali --- conf/mctpd.conf | 4 ++++ src/mctpd.c | 31 ++++++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/conf/mctpd.conf b/conf/mctpd.conf index 89ba406..b580885 100644 --- a/conf/mctpd.conf +++ b/conf/mctpd.conf @@ -5,6 +5,10 @@ mode = "bus-owner" [mctp] message_timeout_ms = 30 +# bus-owner configuration +[bus-owner] +max_pool_size = 15 + # Specify a UUID: not generally required - mctpd will query the system UUID # where available. # uuid = "21f0f554-7f7c-4211-9ca1-6d0f000ea9e7" diff --git a/src/mctpd.c b/src/mctpd.c index 89991a4..651ba57 100644 --- a/src/mctpd.c +++ b/src/mctpd.c @@ -221,6 +221,9 @@ struct ctx { // Verbose logging bool verbose; + + // maximum pool size for assumed MCTP Bridge + uint8_t max_pool_size; }; static int emit_endpoint_added(const struct peer *peer); @@ -3938,9 +3941,27 @@ static int parse_config_mctp(struct ctx *ctx, toml_table_t *mctp_tab) return 0; } +static int parse_config_bus_owner(struct ctx *ctx, toml_table_t *bus_owner) +{ + toml_datum_t val; + + val = toml_int_in(bus_owner, "max_pool_size"); + if (val.ok) { + int64_t i = val.u.i; + if (i <= 0 || i > (eid_alloc_max - eid_alloc_min)) { + warnx("invalid max_pool_size value (must be 1-%d)", + eid_alloc_max - eid_alloc_min); + return -1; + } + ctx->max_pool_size = i; + } + + return 0; +} + static int parse_config(struct ctx *ctx) { - toml_table_t *conf_root, *mctp_tab; + toml_table_t *conf_root, *mctp_tab, *bus_owner; bool conf_file_specified; char errbuf[256] = { 0 }; const char *filename; @@ -3985,6 +4006,13 @@ static int parse_config(struct ctx *ctx) goto out_free; } + bus_owner = toml_table_in(conf_root, "bus-owner"); + if (bus_owner) { + rc = parse_config_bus_owner(ctx, bus_owner); + if (rc) + goto out_free; + } + rc = 0; out_free: @@ -3998,6 +4026,7 @@ static void setup_config_defaults(struct ctx *ctx) { ctx->mctp_timeout = 250000; // 250ms ctx->default_role = ENDPOINT_ROLE_BUS_OWNER; + ctx->max_pool_size = 15; } static void free_config(struct ctx *ctx) From 9e1fc7ce43a445f012145ec0e8ab6d496ec40e60 Mon Sep 17 00:00:00 2001 From: Faizan Ali Date: Thu, 24 Apr 2025 01:55:51 +0530 Subject: [PATCH 3/9] Add mctp bridge endpoint support Add support for MCTP bridge endpoints that can allocate pools of EIDs for downstream endpoints. We assume each AssignEndpoint d-bus call will be for a bridge. with this we allocate/reserve a max_pool_size eid range contiguous to bridge's own eid. Later this pool size is updated based on SET_ENDPOINT_ID command response. - Add pool_size and pool_start fields to struct peer - Update AssignEndpoint d-bus call to support MCTP Bridge dynamic EID assignement. - Fetch contiguous eids for bridge and its downstream endpoints. - is_eid_in_bridge_pool(): for static eid assignement via AssignEndpointStatic d-bus call, need to check eid if being part of any other bridge's pool range. Signed-off-by: Faizan Ali --- src/mctpd.c | 124 +++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 114 insertions(+), 10 deletions(-) diff --git a/src/mctpd.c b/src/mctpd.c index 651ba57..6137e57 100644 --- a/src/mctpd.c +++ b/src/mctpd.c @@ -190,6 +190,10 @@ struct peer { uint8_t endpoint_type; uint8_t medium_spec; } recovery; + + // Pool size + uint8_t pool_size; + uint8_t pool_start; }; struct ctx { @@ -1364,7 +1368,7 @@ static int endpoint_query_phys(struct ctx *ctx, const dest_phys *dest, } /* returns -ECONNREFUSED if the endpoint returns failure. */ -static int endpoint_send_set_endpoint_id(const struct peer *peer, +static int endpoint_send_set_endpoint_id(struct peer *peer, mctp_eid_t *new_eidp) { struct sockaddr_mctp_ext addr; @@ -1430,9 +1434,11 @@ static int endpoint_send_set_endpoint_id(const struct peer *peer, alloc = resp->status & 0x3; if (alloc != 0) { - // TODO for bridges - warnx("%s requested allocation pool, unimplemented", - dest_phys_tostr(dest)); + peer->pool_size = resp->eid_pool_size; + if (peer->ctx->verbose) { + warnx("%s requested allocation of pool size = %d", + dest_phys_tostr(dest), peer->pool_size); + } } rc = 0; @@ -1660,15 +1666,37 @@ static int peer_set_mtu(struct ctx *ctx, struct peer *peer, uint32_t mtu) return rc; } +// checks if EIDs from bridge + 1 has contiguous max_pool_size available eids +// returns next candidate eid for pool start +static int get_next_pool_start(mctp_eid_t bridge_eid, struct net *n, + int max_pool_size) +{ + if (bridge_eid + 1 + max_pool_size > eid_alloc_max) { + return -EADDRNOTAVAIL; + } + for (mctp_eid_t e = bridge_eid + 1; e <= bridge_eid + max_pool_size; + e++) { + // found a bridge in between, need to skip its pool range + if (n->peers[e] != NULL) { + e += n->peers[e]->pool_size; + return e; + } + } + /* possible to have contiguous eids within + bridge_eid to bridge_eid + max_pool_size */ + return bridge_eid + 1; +} + static int endpoint_assign_eid(struct ctx *ctx, sd_bus_error *berr, const dest_phys *dest, struct peer **ret_peer, - mctp_eid_t static_eid) + mctp_eid_t static_eid, bool assign_bridge) { mctp_eid_t e, new_eid; struct net *n = NULL; struct peer *peer = NULL; uint32_t net; int rc; + bool is_pool_possible = false; net = mctp_nl_net_byindex(ctx->nl, dest->ifindex); if (!net) { @@ -1691,11 +1719,50 @@ static int endpoint_assign_eid(struct ctx *ctx, sd_bus_error *berr, } else { /* Find an unused EID */ for (e = eid_alloc_min; e <= eid_alloc_max; e++) { - if (n->peers[e]) + if (n->peers[e]) { + // used peer may be a bridge, skip its eid range + e += n->peers[e]->pool_size; continue; + } + + // check for max sized pool from e + 1 + if (assign_bridge) { + int next_pool_start = get_next_pool_start( + e, n, ctx->max_pool_size); + if (next_pool_start < 0) { + warnx("Ran out of EIDs from net %d while" + "allocating bridge downstream endpoint at %s ", + net, dest_phys_tostr(dest)); + is_pool_possible = false; + /*ran out of pool eid : set only bridge eid then + find first available bridge eid which is not part of any pool*/ + for (e = eid_alloc_min; + e <= eid_alloc_max; e++) { + if (n->peers[e]) { + // used peer may be a bridge, skip its eid range + e += n->peers[e] + ->pool_size; + continue; + } + break; + } + } else if (next_pool_start != e + 1) { + // e doesn't have any contiguous max pool size eids available + e += next_pool_start; + continue; + } else { + // found contigous eids of max_pool_size from bridge_eid + is_pool_possible = true; + } + } + rc = add_peer(ctx, dest, e, net, &peer); if (rc < 0) return rc; + if (assign_bridge && is_pool_possible) { + peer->pool_size = ctx->max_pool_size; + peer->pool_start = e + 1; + } break; } if (e > eid_alloc_max) { @@ -1718,6 +1785,10 @@ static int endpoint_assign_eid(struct ctx *ctx, sd_bus_error *berr, } if (new_eid != peer->eid) { + // avoid allocation for any different EID in response + warnx("Mismatch of requested from received EID, resetting the pool"); + peer->pool_size = 0; + peer->pool_start = 0; rc = change_peer_eid(peer, new_eid); if (rc == -EEXIST) { sd_bus_error_setf( @@ -2117,7 +2188,7 @@ static int method_setup_endpoint(sd_bus_message *call, void *data, } /* Set Endpoint ID */ - rc = endpoint_assign_eid(ctx, berr, dest, &peer, 0); + rc = endpoint_assign_eid(ctx, berr, dest, &peer, 0, false); if (rc < 0) goto err; @@ -2170,7 +2241,7 @@ static int method_assign_endpoint(sd_bus_message *call, void *data, peer->net, peer_path, 0); } - rc = endpoint_assign_eid(ctx, berr, dest, &peer, 0); + rc = endpoint_assign_eid(ctx, berr, dest, &peer, 0, true); if (rc < 0) goto err; @@ -2178,6 +2249,10 @@ static int method_assign_endpoint(sd_bus_message *call, void *data, if (!peer_path) goto err; + if (peer->pool_size > 0) { + // Call for Allocate EndpointID + } + return sd_bus_reply_method_return(call, "yisb", peer->eid, peer->net, peer_path, 1); err: @@ -2185,6 +2260,22 @@ static int method_assign_endpoint(sd_bus_message *call, void *data, return rc; } +// Checks if given EID belongs to any bridge's pool range +static bool is_eid_in_bridge_pool(struct net *n, mctp_eid_t eid) +{ + for (int i = eid_alloc_min; i <= eid; i++) { + struct peer *peer = n->peers[i]; + if (peer && peer->pool_size > 0) { + if (eid >= peer->pool_start && + eid < peer->pool_start + peer->pool_size) { + return true; + } + i += peer->pool_size; + } + } + return false; +} + static int method_assign_endpoint_static(sd_bus_message *call, void *data, sd_bus_error *berr) { @@ -2239,10 +2330,22 @@ static int method_assign_endpoint_static(sd_bus_message *call, void *data, return sd_bus_error_setf(berr, SD_BUS_ERROR_INVALID_ARGS, "Address in use"); + } else { + // is requested EID part of any bridge pool range + struct net *n = lookup_net(ctx, netid); + if (!n) { + bug_warn("%s: Bad old net %d", __func__, netid); + return -EPROTO; + } + if (is_eid_in_bridge_pool(n, eid)) { + return sd_bus_error_setf( + berr, SD_BUS_ERROR_INVALID_ARGS, + "EID belongs to another MCTP bridge pool"); + } } } - rc = endpoint_assign_eid(ctx, berr, dest, &peer, eid); + rc = endpoint_assign_eid(ctx, berr, dest, &peer, eid, false); if (rc < 0) { goto err; } @@ -2652,7 +2755,8 @@ static int peer_endpoint_recover(sd_event_source *s, uint64_t usec, * after which we immediately return as there's no old peer state left to * maintain. */ - return endpoint_assign_eid(ctx, NULL, &phys, &peer, 0); + return endpoint_assign_eid(ctx, NULL, &phys, &peer, 0, + false); } /* Confirmation of the same device, apply its already allocated EID */ From 7ae64b2d6b99903f48366e2e1b6223237f618db6 Mon Sep 17 00:00:00 2001 From: Faizan Ali Date: Thu, 24 Apr 2025 17:57:25 +0530 Subject: [PATCH 4/9] Implement ALLOCATE_ENDPOINT_ID support for bridges Add implementation for the MCTP ALLOCATE_ENDPOINT_ID control command to enable bridges to allocate EID pools for downstream endpoints. - Add endpoint_send_allocate_endpoint_id() for sending allocation requests - Update gateway route for downstream EIDs - Integrate allocation of dynamic eid for downstream endpoints of bridge with AssignEndpoint d-bus method Signed-off-by: Faizan Ali --- src/mctpd.c | 149 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 149 insertions(+) diff --git a/src/mctpd.c b/src/mctpd.c index 6137e57..3f8549a 100644 --- a/src/mctpd.c +++ b/src/mctpd.c @@ -253,6 +253,7 @@ static int del_local_eid(struct ctx *ctx, uint32_t net, int eid); static int add_net(struct ctx *ctx, uint32_t net); static void del_net(struct net *net); static int add_interface(struct ctx *ctx, int ifindex); +static int endpoint_allocate_eid(struct peer *peer); static const sd_bus_vtable bus_endpoint_obmc_vtable[]; static const sd_bus_vtable bus_endpoint_cc_vtable[]; @@ -2251,6 +2252,18 @@ static int method_assign_endpoint(sd_bus_message *call, void *data, if (peer->pool_size > 0) { // Call for Allocate EndpointID + rc = endpoint_allocate_eid(peer); + if (rc < 0) { + warnx("Failed to allocate downstream EIDs"); + } else { + if (peer->ctx->verbose) { + fprintf(stderr, + "Downstream EIDs assigned from %d to %d : pool size %d\n", + peer->pool_start, + peer->pool_start + peer->pool_size - 1, + peer->pool_size); + } + } } return sd_bus_reply_method_return(call, "yisb", peer->eid, peer->net, @@ -2473,6 +2486,20 @@ static int peer_route_update(struct peer *peer, uint16_t type) return mctp_nl_route_add(peer->ctx->nl, peer->eid, 0, peer->phys.ifindex, NULL, peer->mtu); } else if (type == RTM_DELROUTE) { + if (peer->pool_size > 0) { + int rc = 0; + struct mctp_fq_addr gw_addr = { 0 }; + gw_addr.net = peer->net; + gw_addr.eid = peer->eid; + rc = mctp_nl_route_del(peer->ctx->nl, peer->pool_start, + peer->pool_size - 1, + peer->phys.ifindex, &gw_addr); + if (rc < 0) + warnx("failed to delete route for peer pool eids %d-%d %s", + peer->pool_start, + peer->pool_start + peer->pool_size - 1, + strerror(-rc)); + } return mctp_nl_route_del(peer->ctx->nl, peer->eid, 0, peer->phys.ifindex, NULL); } @@ -4138,6 +4165,128 @@ static void free_config(struct ctx *ctx) free(ctx->config_filename); } +static int endpoint_send_allocate_endpoint_id(struct peer *peer, + mctp_eid_t eid_start, + uint8_t eid_pool_size, + mctp_ctrl_cmd_alloc_eid_op oper, + uint8_t *allocated_pool_size, + mctp_eid_t *allocated_pool_start) +{ + struct sockaddr_mctp_ext addr; + struct mctp_ctrl_cmd_alloc_eid req = { 0 }; + struct mctp_ctrl_resp_alloc_eid *resp = NULL; + uint8_t *buf = NULL; + size_t buf_size; + uint8_t iid, stat; + int rc; + + iid = mctp_next_iid(peer->ctx); + req.ctrl_hdr.rq_dgram_inst = RQDI_REQ | iid; + req.ctrl_hdr.command_code = MCTP_CTRL_CMD_ALLOCATE_ENDPOINT_IDS; + req.alloc_eid_op = (uint8_t)(oper & 0x03); + req.pool_size = eid_pool_size; + req.start_eid = eid_start; + rc = endpoint_query_peer(peer, MCTP_CTRL_HDR_MSG_TYPE, &req, + sizeof(req), &buf, &buf_size, &addr); + if (rc < 0) + goto out; + + rc = mctp_ctrl_validate_response(buf, buf_size, sizeof(*resp), + peer_tostr_short(peer), iid, + MCTP_CTRL_CMD_ALLOCATE_ENDPOINT_IDS); + + if (rc) + goto out; + + resp = (void *)buf; + if (!resp) { + warnx("%s Invalid response Buffer\n", __func__); + return -ENOMEM; + } + + stat = resp->status & 0x03; + if (stat == 0x00) { + if (peer->ctx->verbose) { + fprintf(stderr, "%s Allocation Accepted \n", __func__); + } + if (resp->eid_pool_size != eid_pool_size || + resp->eid_set != eid_start) { + warnx("Unexpected pool start %d pool size %d", + resp->eid_set, resp->eid_pool_size); + rc = -1; + goto out; + } + } else { + if (stat == 0x1) + warnx("%s Allocation was rejected: already allocated by other bus" + " pool start %d, pool size %d", + __func__, resp->eid_pool_size, resp->eid_set); + rc = -1; + goto out; + } + + *allocated_pool_size = resp->eid_pool_size; + *allocated_pool_start = resp->eid_set; + if (peer->ctx->verbose) { + fprintf(stderr, + "%s Allocated size of %d, starting from EID %d\n", + __func__, resp->eid_pool_size, resp->eid_set); + } + +out: + free(buf); + return rc; +} + +static int endpoint_allocate_eid(struct peer *peer) +{ + uint8_t allocated_pool_size = 0; + mctp_eid_t allocated_pool_start = 0; + int rc = 0; + + /* Find pool sized contiguous unused eids to allocate on the bridge. */ + if (peer->pool_start >= eid_alloc_max || peer->pool_start <= 0) { + warnx("%s Invalid Pool start %d", __func__, peer->pool_start); + return -1; + } + rc = endpoint_send_allocate_endpoint_id( + peer, peer->pool_start, peer->pool_size, + mctp_ctrl_cmd_alloc_eid_alloc_eid, &allocated_pool_size, + &allocated_pool_start); + if (rc) { + //reset peer pool + peer->pool_size = 0; + peer->pool_start = 0; + } else { + peer->pool_size = allocated_pool_size; + peer->pool_start = allocated_pool_start; + + // add Gateway route for all Bridge's downstream eids + if (peer->pool_size > 0) { + struct mctp_fq_addr gw_addr = { 0 }; + gw_addr.net = peer->net; + gw_addr.eid = peer->eid; + rc = mctp_nl_route_add(peer->ctx->nl, peer->pool_start, + peer->pool_size - 1, + peer->phys.ifindex, &gw_addr, + peer->mtu); + if (rc < 0) { + warnx("Failed to add Gateway route for EID %d: %s", + gw_addr.eid, strerror(-rc)); + // If the route already exists, continue polling + if (rc != -EEXIST) { + return rc; + } else { + rc = 0; + } + } + // TODO: Polling logic for downstream EID + } + } + + return rc; +} + int main(int argc, char **argv) { struct ctx ctxi = { 0 }, *ctx = &ctxi; From 249a7ce85279c8d858c1eb712bb6cc0666b84838 Mon Sep 17 00:00:00 2001 From: Faizan Ali Date: Wed, 25 Jun 2025 17:56:52 +0530 Subject: [PATCH 5/9] Update mctp document with mctp bridge support * updated mctpd.md with new mctp bridge support for dynamic eid assignment from AssignEndpoint d-bus call Signed-off-by: Faizan Ali --- docs/mctpd.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/mctpd.md b/docs/mctpd.md index b3900bd..c7d1eab 100644 --- a/docs/mctpd.md +++ b/docs/mctpd.md @@ -126,7 +126,11 @@ busctl call au.com.codeconstruct.MCTP1 \ Similar to SetupEndpoint, but will always assign an EID rather than querying for existing ones. Will return `new = false` when an endpoint is already known to -`mctpd`. +`mctpd`. If the endpoint is an MCTP bridge (indicated by requesting a pool size +in its Set Endpoint ID response), this method attempts to allocate a contiguous +range of EIDs for the bridge's downstream endpoints. If sufficient contiguous EIDs +are not available within the dynamic allocation pool for the network, only the +bridge's own EID will be assigned, and downstream EID allocation will fail. #### `.AssignEndpointStatic`: `ayy` → `yisb` From 1baab5d23961e4e4849fe579ca06433676c6b940 Mon Sep 17 00:00:00 2001 From: Faizan Ali Date: Tue, 1 Jul 2025 01:45:11 +0530 Subject: [PATCH 6/9] Add tests for dynamic bridge eid and downstream eid assignment Add new test for validating AssignEndpoint D-Bus method to verify bridge endpoint EID allocation being contiguous to its downstream eids. - Add test_assign_dynamic_bridge_eid() for bridge simulation testing - Simulate bridge with some downstream endpoints in test framework - Test EID allocation of bridge and downstream being contiguous Signed-off-by: Faizan Ali --- tests/mctpenv/__init__.py | 14 +++++++++++++- tests/test_mctpd.py | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 1 deletion(-) diff --git a/tests/mctpenv/__init__.py b/tests/mctpenv/__init__.py index 8f82bf9..1599733 100644 --- a/tests/mctpenv/__init__.py +++ b/tests/mctpenv/__init__.py @@ -348,7 +348,7 @@ async def handle_mctp_control(self, sock, addr, data): # Set Endpoint ID (op, eid) = data[2:] self.eid = eid - data = bytes(hdr + [0x00, 0x00, self.eid, 0x00]) + data = bytes(hdr + [0x00, 0x01 if len(self.bridged_eps) > 0 else 0x00, self.eid, len(self.bridged_eps)]) await sock.send(raddr, data) elif opcode == 2: @@ -367,6 +367,18 @@ async def handle_mctp_control(self, sock, addr, data): data = bytes(hdr + [0x00, len(types)] + types) await sock.send(raddr, data) + elif opcode == 8: + # Allocate Endpoint IDs + (_, _, _, pool_size, pool_start) = data + num_eps = min(pool_size, len(self.bridged_eps)) + data = bytes(hdr + [0x00, 0x00, num_eps, pool_start]) + + # Assign sequential EIDs starting from pool_start + for ep in range(num_eps): + self.bridged_eps[ep].eid = pool_start + ep + + await sock.send(raddr, data) + else: await sock.send(raddr, bytes(hdr + [0x05])) # unsupported command diff --git a/tests/test_mctpd.py b/tests/test_mctpd.py index 70ceec8..842be96 100644 --- a/tests/test_mctpd.py +++ b/tests/test_mctpd.py @@ -831,3 +831,42 @@ async def test_interface_rename_with_peers(dbus, mctpd): # ensure the endpoint persists after rename ep_obj = await dbus.get_proxy_object(MCTPD_C, ep_path) assert ep_obj is not None +""" Test bridge endpoint dynamic EID assignment and downstream +endpoint EID allocation + +Tests that: +- Bridge endpoint can be assigned a dynamic EID +- Downstream endpoints get contiguous EIDs after bridge's own eid +""" +async def test_assign_dynamic_bridge_eid(dbus, mctpd): + iface = mctpd.system.interfaces[0] + mctp = await mctpd_mctp_iface_obj(dbus, iface) + ep = mctpd.network.endpoints[0] + + pool_size = 2 + + # Set up bridged endpoints as undiscovered EID 0 + for i in range(pool_size): + br_ep = Endpoint(iface, bytes(), types=[0, 2]) + ep.add_bridged_ep(br_ep) + mctpd.network.add_endpoint(br_ep) + + mctp = await mctpd_mctp_iface_obj(dbus, iface) + + # dynamic EID assigment for dev1 + (eid, _, path, new) = await mctp.call_assign_endpoint( + ep.lladdr, + ) + + assert new + # Assert for assigned bridge endpoint ID + assert path == f'/au/com/codeconstruct/mctp1/networks/1/endpoints/{eid}' + assert new + + net = await mctpd_mctp_network_obj(dbus, iface.net) + for i in range(pool_size): + br_ep = ep.bridged_eps[i] + #check if the downstream endpoint eid is contiguous to the bridge endpoint eid + assert (eid + i + 1) == br_ep.eid + (path, new) = await net.call_learn_endpoint(br_ep.eid) + assert path == f'/au/com/codeconstruct/mctp1/networks/1/endpoints/{br_ep.eid}' From cd91cc8b7fd9f4d0440b7a32e508b464037ba0dd Mon Sep 17 00:00:00 2001 From: Faizan Ali Date: Fri, 18 Jul 2025 04:21:11 +0530 Subject: [PATCH 7/9] Add new Endpoint object interface au.com.codeconstruct.MCTP.Bridge1 * au.com.codeconstruct.MCTP.Bridge1 interface will capture details of bridge type endpoint such as pool start, pool size, pool end. Signed-off-by: Faizan Ali --- src/mctpd.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/src/mctpd.c b/src/mctpd.c index 3f8549a..eca5c85 100644 --- a/src/mctpd.c +++ b/src/mctpd.c @@ -45,6 +45,7 @@ #define MCTP_DBUS_PATH_LINKS "/au/com/codeconstruct/mctp1/interfaces" #define CC_MCTP_DBUS_IFACE_BUSOWNER "au.com.codeconstruct.MCTP.BusOwner1" #define CC_MCTP_DBUS_IFACE_ENDPOINT "au.com.codeconstruct.MCTP.Endpoint1" +#define CC_MCTP_DBUS_IFACE_BRIDGE "au.com.codeconstruct.MCTP.Bridge1" #define CC_MCTP_DBUS_IFACE_TESTING "au.com.codeconstruct.MCTPTesting" #define MCTP_DBUS_NAME "au.com.codeconstruct.MCTP1" #define MCTP_DBUS_IFACE_ENDPOINT "xyz.openbmc_project.MCTP.Endpoint" @@ -156,6 +157,7 @@ struct peer { bool published; sd_bus_slot *slot_obmc_endpoint; sd_bus_slot *slot_cc_endpoint; + sd_bus_slot *slot_bridge; sd_bus_slot *slot_uuid; char *path; @@ -257,6 +259,7 @@ static int endpoint_allocate_eid(struct peer *peer); static const sd_bus_vtable bus_endpoint_obmc_vtable[]; static const sd_bus_vtable bus_endpoint_cc_vtable[]; +static const sd_bus_vtable bus_endpoint_bridge[]; static const sd_bus_vtable bus_endpoint_uuid_vtable[]; __attribute__((format(printf, 1, 2))) static void bug_warn(const char *fmt, ...) @@ -1599,6 +1602,7 @@ static void free_peers(struct ctx *ctx) free(peer->path); sd_bus_slot_unref(peer->slot_obmc_endpoint); sd_bus_slot_unref(peer->slot_cc_endpoint); + sd_bus_slot_unref(peer->slot_bridge); sd_bus_slot_unref(peer->slot_uuid); free(peer); } @@ -2613,6 +2617,11 @@ static int publish_peer(struct peer *peer, bool add_route) peer->path, CC_MCTP_DBUS_IFACE_ENDPOINT, bus_endpoint_cc_vtable, peer); + if (peer->pool_size > 0) { + sd_bus_add_object_vtable(peer->ctx->bus, &peer->slot_bridge, + peer->path, CC_MCTP_DBUS_IFACE_BRIDGE, + bus_endpoint_bridge, peer); + } if (peer->uuid) { sd_bus_add_object_vtable(peer->ctx->bus, &peer->slot_uuid, peer->path, OPENBMC_IFACE_COMMON_UUID, @@ -2663,6 +2672,8 @@ static int unpublish_peer(struct peer *peer) peer->slot_obmc_endpoint = NULL; sd_bus_slot_unref(peer->slot_cc_endpoint); peer->slot_cc_endpoint = NULL; + sd_bus_slot_unref(peer->slot_bridge); + peer->slot_bridge = NULL; sd_bus_slot_unref(peer->slot_uuid); peer->slot_uuid = NULL; peer->published = false; @@ -3047,6 +3058,33 @@ static int bus_endpoint_get_prop(sd_bus *bus, const char *path, return rc; } +static int bus_bridge_get_prop(sd_bus *bus, const char *path, + const char *interface, const char *property, + sd_bus_message *reply, void *userdata, + sd_bus_error *berr) +{ + struct peer *peer = userdata; + int rc; + + if (strcmp(property, "PoolStart") == 0) { + rc = sd_bus_message_append(reply, "y", peer->pool_start); + } else if (strcmp(property, "PoolSize") == 0) { + rc = sd_bus_message_append(reply, "y", peer->pool_size); + } else if (strcmp(property, "PoolEnd") == 0) { + uint8_t pool_end = + peer->pool_size ? + peer->pool_start + peer->pool_size - 1 : + 0; + rc = sd_bus_message_append(reply, "y", pool_end); + } else { + warnx("Unknown bridge property '%s' for %s iface %s", property, + path, interface); + rc = -ENOENT; + } + + return rc; +} + static int bus_network_get_prop(sd_bus *bus, const char *path, const char *interface, const char *property, sd_bus_message *reply, void *userdata, @@ -3246,6 +3284,26 @@ static const sd_bus_vtable bus_endpoint_cc_vtable[] = { SD_BUS_VTABLE_END }; +static const sd_bus_vtable bus_endpoint_bridge[] = { + SD_BUS_VTABLE_START(0), + SD_BUS_PROPERTY("PoolStart", + "y", + bus_bridge_get_prop, + 0, + SD_BUS_VTABLE_PROPERTY_CONST), + SD_BUS_PROPERTY("PoolSize", + "y", + bus_bridge_get_prop, + 0, + SD_BUS_VTABLE_PROPERTY_CONST), + SD_BUS_PROPERTY("PoolEnd", + "y", + bus_bridge_get_prop, + 0, + SD_BUS_VTABLE_PROPERTY_CONST), + SD_BUS_VTABLE_END +}; + static const sd_bus_vtable bus_link_vtable[] = { SD_BUS_VTABLE_START(0), SD_BUS_WRITABLE_PROPERTY("Role", From 50617751a7fdb1f9d18ed39a59f25967c5d5abed Mon Sep 17 00:00:00 2001 From: Faizan Ali Date: Fri, 27 Jun 2025 14:22:51 +0530 Subject: [PATCH 8/9] mctp: implement asynchronous polling for bridge downstream EIDs Add asynchronous polling mechanism for MCTP bridge downstream endpoints to discover them. The polling continues throughout the bridge's existence under the MCTP network and creates peer structures only when endpoints respond to poll successfully. - Update bus-owner config with polling configuration ep_removal_threshold - Add bridge_eid_poll_context and bridge_endpoint_discovery structures - Implement setup_async_downstream_ep_poll() for polling initialization - Add send_bridge_poll_request() for periodic GET_ENDPOINT_ID messages - Add handle_bridge_poll_response() for response processing - Add handle_poll_timeout() with ep_removal_threshold failure handling - Add cleanup_bridge_polling() and cleanup_bridge_downstream_peers() The polling uses an asynchronous sender-response model where mctpd continuously polls downstream EIDs and creates peer structures upon successful responses. Failed endpoints are removed after reaching the failure threshold (ep_removal_threshold). Polling stops and all resources are cleaned up when bridges are removed. Signed-off-by: Faizan Ali --- conf/mctpd.conf | 1 + src/mctp-control-spec.h | 6 + src/mctpd.c | 531 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 535 insertions(+), 3 deletions(-) diff --git a/conf/mctpd.conf b/conf/mctpd.conf index b580885..c02863d 100644 --- a/conf/mctpd.conf +++ b/conf/mctpd.conf @@ -8,6 +8,7 @@ message_timeout_ms = 30 # bus-owner configuration [bus-owner] max_pool_size = 15 +ep_removal_thresold = 3 # Bridge poll: failure threshold before removing an endpoint # Specify a UUID: not generally required - mctpd will query the system UUID # where available. diff --git a/src/mctp-control-spec.h b/src/mctp-control-spec.h index 877ff0e..73aa682 100644 --- a/src/mctp-control-spec.h +++ b/src/mctp-control-spec.h @@ -330,3 +330,9 @@ struct mctp_ctrl_resp_alloc_eid { #define GET_ROUTING_ENTRY_TYPE(field) \ (((field) >> MCTP_ROUTING_ENTRY_TYPE_SHIFT) & \ MCTP_ROUTING_ENTRY_TYPE_MASK) + +/* MCTP Time Specification + * See DSP0283 v1.0.0 Table 4 + */ +#define MCTP_TRECLAIM_US 5000000 // 5s TRECLAIM +#define MCTP_RESP_TIMEOUT 400000 //400ms MT2 diff --git a/src/mctpd.c b/src/mctpd.c index eca5c85..c277efb 100644 --- a/src/mctpd.c +++ b/src/mctpd.c @@ -196,6 +196,9 @@ struct peer { // Pool size uint8_t pool_size; uint8_t pool_start; + + // Bridge polling context (only for bridge peers) + struct bridge_endpoint_discovery *bridge_discovery; }; struct ctx { @@ -230,6 +233,33 @@ struct ctx { // maximum pool size for assumed MCTP Bridge uint8_t max_pool_size; + + // maximum failure in response to sent poll command before endpoint is removed + int ep_removal_thresold; + int poll_interval_us; + int poll_timeout_us; +}; + +struct bridge_eid_poll_context { + int sd; // Socket for sending/recieving to this EID + sd_event_source *send_timer; // Timer for periodic sends + sd_event_source *recv_io; // IO handler for receives + sd_event_source *timeout_timer; // Timer for response timeout + int poll_interval_us; // Interval b/w poll commands in usec + int poll_timeout_us; // Timeout for sent poll command in usec + mctp_eid_t eid; + int net; + bool stop_poll; + int failure_count; + struct peer *peer; + struct bridge_endpoint_discovery + *parent; // Back pointer to parent context +}; + +struct bridge_endpoint_discovery { + struct bridge_eid_poll_context + *poll_ctxs; // Array of endpoint poll context + struct peer *bridge_peer; }; static int emit_endpoint_added(const struct peer *peer); @@ -256,6 +286,8 @@ static int add_net(struct ctx *ctx, uint32_t net); static void del_net(struct net *net); static int add_interface(struct ctx *ctx, int ifindex); static int endpoint_allocate_eid(struct peer *peer); +static void cleanup_bridge_polling_ctx(struct peer *bridge_peer); +static void remove_bridge_downstream_peers(struct peer *bridge_peer); static const sd_bus_vtable bus_endpoint_obmc_vtable[]; static const sd_bus_vtable bus_endpoint_cc_vtable[]; @@ -1538,6 +1570,12 @@ static int remove_peer(struct peer *peer) return -EPROTO; } + // Clean up downstream peers if this is a bridge peer + if (peer->pool_size > 0) { + cleanup_bridge_polling_ctx(peer); + remove_bridge_downstream_peers(peer); + } + unpublish_peer(peer); // Clear it @@ -1555,8 +1593,12 @@ static int remove_peer(struct peer *peer) } n->peers[peer->eid] = NULL; - free(peer->message_types); - free(peer->uuid); + if (peer->message_types) { + free(peer->message_types); + } + if (peer->uuid) { + free(peer->uuid); + } for (idx = 0; idx < ctx->num_peers; idx++) { if (ctx->peers[idx] == peer) @@ -1597,6 +1639,7 @@ static void free_peers(struct ctx *ctx) { for (size_t i = 0; i < ctx->num_peers; i++) { struct peer *peer = ctx->peers[i]; + cleanup_bridge_polling_ctx(peer); free(peer->message_types); free(peer->uuid); free(peer->path); @@ -4145,6 +4188,16 @@ static int parse_config_bus_owner(struct ctx *ctx, toml_table_t *bus_owner) ctx->max_pool_size = i; } + val = toml_int_in(bus_owner, "ep_removal_thresold"); + if (val.ok) { + int64_t i = val.u.i; + if (i <= 0) { + warnx("invalid ep_removal_thresold value (must be >= 1)"); + return -1; + } + ctx->ep_removal_thresold = i; + } + return 0; } @@ -4216,6 +4269,9 @@ static void setup_config_defaults(struct ctx *ctx) ctx->mctp_timeout = 250000; // 250ms ctx->default_role = ENDPOINT_ROLE_BUS_OWNER; ctx->max_pool_size = 15; + ctx->ep_removal_thresold = 3; + ctx->poll_interval_us = MCTP_TRECLAIM_US / 2; + ctx->poll_timeout_us = MCTP_RESP_TIMEOUT; } static void free_config(struct ctx *ctx) @@ -4296,6 +4352,474 @@ static int endpoint_send_allocate_endpoint_id(struct peer *peer, return rc; } +static void cleanup_bridge_polling_ctx(struct peer *bridge_peer) +{ + struct bridge_endpoint_discovery *discovery = + bridge_peer->bridge_discovery; + if (!bridge_peer || bridge_peer->pool_size == 0 || + !bridge_peer->bridge_discovery) { + return; + } + + fprintf(stderr, "Cleaning up poll ctx for bridge peer EID %d\n", + bridge_peer->eid); + + for (mctp_eid_t eid = bridge_peer->pool_start; + eid <= bridge_peer->pool_start + bridge_peer->pool_size - 1; + eid++) { + struct bridge_eid_poll_context *poll_ctx = + &discovery->poll_ctxs[eid - bridge_peer->pool_start]; + poll_ctx->stop_poll = true; + } + + for (mctp_eid_t eid = bridge_peer->pool_start; + eid <= bridge_peer->pool_start + bridge_peer->pool_size - 1; + eid++) { + struct bridge_eid_poll_context *poll_ctx = + &discovery->poll_ctxs[eid - bridge_peer->pool_start]; + + if (poll_ctx->recv_io) { + sd_event_source_unref(poll_ctx->recv_io); + poll_ctx->recv_io = NULL; + } + if (poll_ctx->send_timer) { + sd_event_source_unref(poll_ctx->send_timer); + poll_ctx->send_timer = NULL; + } + if (poll_ctx->timeout_timer) { + sd_event_source_unref(poll_ctx->timeout_timer); + poll_ctx->timeout_timer = NULL; + } + if (poll_ctx->sd >= 0) { + close(poll_ctx->sd); + poll_ctx->sd = -1; + } + } + + free(discovery->poll_ctxs); + free(discovery); + bridge_peer->bridge_discovery = NULL; +} + +static void remove_bridge_downstream_peers(struct peer *bridge_peer) +{ + struct ctx *ctx = bridge_peer->ctx; + struct net *n = lookup_net(ctx, bridge_peer->net); + int rc = 0; + + if (!bridge_peer || bridge_peer->pool_size == 0) { + return; + } + if (!n) { + warnx("No network found for bridge peer EID %d", + bridge_peer->eid); + return; + } + + if (bridge_peer->ctx->verbose) { + fprintf(stderr, + "Remove downstream peers for bridge EID %d (pool %d-%d, size %d)\n", + bridge_peer->eid, bridge_peer->pool_start, + bridge_peer->pool_start + bridge_peer->pool_size - 1, + bridge_peer->pool_size); + } + + // Find and remove all peers in the bridge's EID pool + for (mctp_eid_t eid = bridge_peer->pool_start; + eid <= bridge_peer->pool_start + bridge_peer->pool_size - 1; + eid++) { + struct peer *downstream_peer = n->peers[eid]; + if (downstream_peer && downstream_peer->state == REMOTE && + downstream_peer != bridge_peer) { + if (bridge_peer->ctx->verbose) { + fprintf(stderr, + "Removing downstream peer EID %d (discovered via bridge EID %d)\n", + eid, bridge_peer->eid); + } + rc = remove_peer(downstream_peer); + if (rc < 0) { + warnx("failed to remove peer %d %s", eid, + strerror(-rc)); + } + } + } +} + +static int handle_poll_timeout(sd_event_source *s, uint64_t usec, + void *userdata) +{ + struct bridge_eid_poll_context *ep_poll_ctx = userdata; + + if (!ep_poll_ctx->parent || !ep_poll_ctx->parent->bridge_peer || + ep_poll_ctx->stop_poll) { + fprintf(stderr, "Bridge polling stopped for EID %d\n", + ep_poll_ctx->eid); + return 0; + } + + ep_poll_ctx->failure_count++; + if (ep_poll_ctx->parent->bridge_peer->ctx->verbose) { + warnx("Response timeout for EID %d count (%d)", + ep_poll_ctx->eid, ep_poll_ctx->failure_count); + } + + if (ep_poll_ctx->failure_count > + ep_poll_ctx->parent->bridge_peer->ctx->ep_removal_thresold) { + warnx("Reached failure threshold for EID %d", ep_poll_ctx->eid); + ep_poll_ctx->failure_count = 0; + if (ep_poll_ctx->peer != NULL) { + remove_peer(ep_poll_ctx->peer); + ep_poll_ctx->peer = NULL; + } + } + + return 0; +} + +static void stop_polling_on_error(struct bridge_eid_poll_context *ep_poll_ctx) +{ + ep_poll_ctx->stop_poll = true; + + // Disable all event sources + if (ep_poll_ctx->recv_io) { + sd_event_source_set_enabled(ep_poll_ctx->recv_io, SD_EVENT_OFF); + } + if (ep_poll_ctx->send_timer) { + sd_event_source_set_enabled(ep_poll_ctx->send_timer, + SD_EVENT_OFF); + } + if (ep_poll_ctx->timeout_timer) { + sd_event_source_set_enabled(ep_poll_ctx->timeout_timer, + SD_EVENT_OFF); + } + + // Close the socket + if (ep_poll_ctx->sd >= 0) { + close(ep_poll_ctx->sd); + ep_poll_ctx->sd = -1; + } +} + +static int handle_bridge_poll_response(sd_event_source *s, int fd, + uint32_t revents, void *userdata) +{ + struct bridge_eid_poll_context *ep_poll_ctx = userdata; + uint8_t *buf = NULL; + size_t buf_size; + struct sockaddr_mctp_ext addr; + struct mctp_ctrl_resp_get_eid *resp; + int rc; + + if (revents & EPOLLERR) { + warnx("%s Socket error for EID %d", __func__, ep_poll_ctx->eid); + stop_polling_on_error(ep_poll_ctx); + return 0; + } + if (revents & EPOLLHUP) { + warnx("%s Socket hangup for EID %d", __func__, + ep_poll_ctx->eid); + stop_polling_on_error(ep_poll_ctx); + return 0; + } + if (!(revents & EPOLLIN)) { + return 0; + } + if (!ep_poll_ctx->parent || !ep_poll_ctx->parent->bridge_peer || + ep_poll_ctx->stop_poll) { + fprintf(stderr, "Bridge polling stopped for EID %d", + ep_poll_ctx->eid); + return 0; + } + + rc = read_message(ep_poll_ctx->parent->bridge_peer->ctx, fd, &buf, + &buf_size, &addr); + if (rc < 0) { + warnx("%s Read failed for EID %d: %s", __func__, + ep_poll_ctx->eid, strerror(-rc)); + return 0; + } + + if (buf_size < sizeof(*resp)) { + warnx("%s Invalid response size for EID %d", __func__, + ep_poll_ctx->eid); + free(buf); + return 0; + } + + resp = (void *)buf; + + if (resp->ctrl_hdr.command_code == MCTP_CTRL_CMD_GET_ENDPOINT_ID) { + // Cancel timeout timer since we got a response + if (ep_poll_ctx->timeout_timer) { + sd_event_source_set_enabled(ep_poll_ctx->timeout_timer, + SD_EVENT_OFF); + } + + ep_poll_ctx->failure_count = 0; + if (ep_poll_ctx->peer == NULL) { + struct peer *peer = NULL; + rc = add_peer(ep_poll_ctx->parent->bridge_peer->ctx, + &(ep_poll_ctx->parent->bridge_peer->phys), + ep_poll_ctx->eid, ep_poll_ctx->net, + &peer); + if (rc < 0) { + warnx("Failed to add peer for EID %d: %s", + ep_poll_ctx->eid, strerror(-rc)); + free(buf); + return rc; + } + + ep_poll_ctx->peer = peer; + peer->mtu = mctp_nl_min_mtu_byindex(peer->ctx->nl, + peer->phys.ifindex); + rc = query_peer_properties(peer); + if (rc < 0) { + warnx("Failed to query peer properties for EID %d: %s", + ep_poll_ctx->eid, strerror(-rc)); + remove_peer(peer); + ep_poll_ctx->peer = NULL; + free(buf); + return rc; + } + rc = publish_peer(peer, false); + if (rc < 0) { + warnx("Failed to publish peer for EID %d: %s", + ep_poll_ctx->eid, strerror(-rc)); + remove_peer(peer); + ep_poll_ctx->peer = NULL; + free(buf); + return rc; + } + } + } else { + warnx("Polling: Received unexpected command code %d for EID %d : Ignoring", + resp->ctrl_hdr.command_code, ep_poll_ctx->eid); + ep_poll_ctx->failure_count++; + } + + free(buf); + return 0; +} + +static int send_bridge_poll_request(sd_event_source *s, uint64_t usec, + void *userdata) +{ + struct bridge_eid_poll_context *ep_poll_ctx = userdata; + struct mctp_ctrl_cmd_get_eid req = { 0 }; + struct sockaddr_mctp addr = { .smctp_family = AF_MCTP, + .smctp_network = ep_poll_ctx->net, + .smctp_addr.s_addr = ep_poll_ctx->eid, + .smctp_type = MCTP_CTRL_HDR_MSG_TYPE, + .smctp_tag = MCTP_TAG_OWNER }; + int rc; + uint64_t next_usec; + + if (!ep_poll_ctx->parent || !ep_poll_ctx->parent->bridge_peer || + ep_poll_ctx->stop_poll || ep_poll_ctx->sd < 0) { + warnx("Polling stopped for EID %d", ep_poll_ctx->eid); + if (ep_poll_ctx->send_timer) { + sd_event_source_set_enabled(ep_poll_ctx->send_timer, + SD_EVENT_OFF); + } + if (ep_poll_ctx->timeout_timer) { + sd_event_source_set_enabled(ep_poll_ctx->timeout_timer, + SD_EVENT_OFF); + } + return 0; + } + + req.ctrl_hdr.command_code = MCTP_CTRL_CMD_GET_ENDPOINT_ID; + req.ctrl_hdr.rq_dgram_inst = + RQDI_REQ | mctp_next_iid(ep_poll_ctx->parent->bridge_peer->ctx); + + rc = mctp_ops.mctp.sendto(ep_poll_ctx->sd, &req, sizeof(req), 0, + (struct sockaddr *)&addr, sizeof(addr)); + if (rc < 0) { + warnx("Failed to send poll command to EID %d: %s", + ep_poll_ctx->eid, strerror(errno)); + } + + if (ep_poll_ctx->timeout_timer) { + sd_event_now(ep_poll_ctx->parent->bridge_peer->ctx->event, + CLOCK_MONOTONIC, &next_usec); + sd_event_source_set_time(ep_poll_ctx->timeout_timer, + next_usec + + ep_poll_ctx->poll_timeout_us); + sd_event_source_set_enabled(ep_poll_ctx->timeout_timer, + SD_EVENT_ONESHOT); + } + + if (!ep_poll_ctx->stop_poll) { + // Schedule next poll command + sd_event_now(ep_poll_ctx->parent->bridge_peer->ctx->event, + CLOCK_MONOTONIC, &next_usec); + sd_event_source_set_time( + s, next_usec + ep_poll_ctx->poll_interval_us); + sd_event_source_set_enabled(s, SD_EVENT_ONESHOT); + } + + return 0; +} + +static int setup_eid_poll_context(struct bridge_eid_poll_context *poll_ctx, + mctp_eid_t eid) +{ + int rc; + int val = 1; + struct sockaddr_mctp addr = { .smctp_family = AF_MCTP, + .smctp_network = poll_ctx->net, + .smctp_type = MCTP_CTRL_HDR_MSG_TYPE, + .smctp_tag = MCTP_TAG_OWNER }; + + // Create and setup receive socket + poll_ctx->sd = mctp_ops.mctp.socket(); + if (poll_ctx->sd < 0) + return -errno; + + addr.smctp_addr.s_addr = eid; + rc = mctp_ops.mctp.bind(poll_ctx->sd, (struct sockaddr *)&addr, + sizeof(addr)); + if (rc < 0) { + close(poll_ctx->sd); + return -errno; + } + + rc = mctp_ops.mctp.setsockopt(poll_ctx->sd, SOL_MCTP, MCTP_OPT_ADDR_EXT, + &val, sizeof(val)); + if (rc < 0) + goto err_cleanup; + + // Create polling timeout timer + rc = sd_event_add_time(poll_ctx->parent->bridge_peer->ctx->event, + &poll_ctx->timeout_timer, CLOCK_MONOTONIC, 0, 0, + handle_poll_timeout, poll_ctx); + if (rc < 0) + goto err_cleanup; + + // Initially disable the polling timeout timer + sd_event_source_set_enabled(poll_ctx->timeout_timer, SD_EVENT_OFF); + + poll_ctx->eid = eid; + poll_ctx->failure_count = 0; + poll_ctx->peer = NULL; + poll_ctx->poll_interval_us = + poll_ctx->parent->bridge_peer->ctx->poll_interval_us; + poll_ctx->poll_timeout_us = + poll_ctx->parent->bridge_peer->ctx->poll_timeout_us; + poll_ctx->stop_poll = false; + return 0; + +err_cleanup: + warnx("Failure in setting poll context %s", strerror(-rc)); + if (poll_ctx->timeout_timer) { + sd_event_source_unref(poll_ctx->timeout_timer); + } + if (poll_ctx->sd >= 0) { + close(poll_ctx->sd); + } + return -errno; +} + +static int setup_async_downstream_ep_poll(struct peer *bridge_peer) +{ + struct bridge_endpoint_discovery *ep_discovery = NULL; + struct bridge_eid_poll_context *poll_ctx = NULL; + int rc = 0; + + // Check if bridge peer is still valid + if (!bridge_peer || bridge_peer->pool_size == 0) { + warnx("Invalid bridge peer used for polling setup"); + return -EINVAL; + } + + ep_discovery = calloc(1, sizeof(*ep_discovery)); + if (!ep_discovery) { + warnx("Out of memory"); + return -ENOMEM; + } + + ep_discovery->bridge_peer = bridge_peer; + + // Setup poll_ctx for each EID + ep_discovery->poll_ctxs = calloc( + bridge_peer->pool_size, sizeof(struct bridge_eid_poll_context)); + if (!ep_discovery->poll_ctxs) { + rc = -ENOMEM; + warnx("%s Out of memory", __func__); + goto err_free_query; + } + + for (mctp_eid_t eid = bridge_peer->pool_start; + eid <= bridge_peer->pool_start + bridge_peer->pool_size - 1; + eid++) { + poll_ctx = + &ep_discovery->poll_ctxs[eid - bridge_peer->pool_start]; + poll_ctx->parent = ep_discovery; + poll_ctx->net = bridge_peer->net; + + rc = setup_eid_poll_context(poll_ctx, eid); + if (rc < 0) { + warnx("setup_eid_poll_context failed for EID %d: %s", + eid, strerror(-rc)); + goto err_cleanup; + } + + // Setup IO handler for receives + rc = sd_event_add_io(bridge_peer->ctx->event, + &poll_ctx->recv_io, poll_ctx->sd, EPOLLIN, + handle_bridge_poll_response, poll_ctx); + if (rc < 0) { + warnx("sd_event_add_io failed for EID %d: %s", eid, + strerror(-rc)); + goto err_cleanup; + } + + // Setup polling timer for sends + rc = sd_event_add_time(bridge_peer->ctx->event, + &poll_ctx->send_timer, CLOCK_MONOTONIC, + 0, 0, send_bridge_poll_request, + poll_ctx); + if (rc < 0) { + warnx("sd_event_add_time failed for EID %d: %s", eid, + strerror(-rc)); + goto err_cleanup; + } + + // Initiate polling immediately + uint64_t now; + sd_event_now(bridge_peer->ctx->event, CLOCK_MONOTONIC, &now); + sd_event_source_set_time(poll_ctx->send_timer, now); + } + + // Store the discovery structure in the peer for cleanup + bridge_peer->bridge_discovery = ep_discovery; + return 0; + +err_cleanup: + // Cleanup any socket poll_ctxs we created + for (mctp_eid_t eid = bridge_peer->pool_start; + eid <= bridge_peer->pool_start + bridge_peer->pool_size - 1; + eid++) { + struct bridge_eid_poll_context *poll_ctx = + &ep_discovery->poll_ctxs[eid - bridge_peer->pool_start]; + if (poll_ctx->recv_io) + sd_event_source_unref(poll_ctx->recv_io); + if (poll_ctx->send_timer) + sd_event_source_unref(poll_ctx->send_timer); + if (poll_ctx->sd >= 0) + close(poll_ctx->sd); + if (poll_ctx->timeout_timer) + sd_event_source_unref(poll_ctx->timeout_timer); + } + free(ep_discovery->poll_ctxs); + +err_free_query: + free(ep_discovery); + warnx("%s returned error: %s", __func__, strerror(-rc)); + + return rc; +} + static int endpoint_allocate_eid(struct peer *peer) { uint8_t allocated_pool_size = 0; @@ -4338,7 +4862,8 @@ static int endpoint_allocate_eid(struct peer *peer) rc = 0; } } - // TODO: Polling logic for downstream EID + // Configure polling for all downstream EIDs + setup_async_downstream_ep_poll(peer); } } From a472892e6e1514fbb842680cb1cabf9b06ec2eb6 Mon Sep 17 00:00:00 2001 From: Faizan Ali Date: Wed, 23 Jul 2025 01:21:55 +0530 Subject: [PATCH 9/9] Update test_assign_dynamic_bridge_eid() to incorporate bridge polling * modify tets case to include bridge polling logic. * once endpoints are disocvered assert the object path Signed-off-by: Faizan Ali --- tests/mctpenv/__init__.py | 2 +- tests/test_mctpd.py | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/tests/mctpenv/__init__.py b/tests/mctpenv/__init__.py index 1599733..c258262 100644 --- a/tests/mctpenv/__init__.py +++ b/tests/mctpenv/__init__.py @@ -416,7 +416,7 @@ def lookup_endpoint(self, iface, lladdr): # register the core mctp control socket, on which incoming requests # are sent to mctpd def register_mctp_socket(self, socket): - assert self.mctp_socket is None + # assert self.mctp_socket is None self.mctp_socket = socket # MCTP-capable pyroute2 objects diff --git a/tests/test_mctpd.py b/tests/test_mctpd.py index 842be96..9ee393e 100644 --- a/tests/test_mctpd.py +++ b/tests/test_mctpd.py @@ -837,6 +837,7 @@ async def test_interface_rename_with_peers(dbus, mctpd): Tests that: - Bridge endpoint can be assigned a dynamic EID - Downstream endpoints get contiguous EIDs after bridge's own eid +- Endpoint discovery is done via polling which continues in the background discovering the endpoint """ async def test_assign_dynamic_bridge_eid(dbus, mctpd): iface = mctpd.system.interfaces[0] @@ -863,10 +864,19 @@ async def test_assign_dynamic_bridge_eid(dbus, mctpd): assert path == f'/au/com/codeconstruct/mctp1/networks/1/endpoints/{eid}' assert new + # static neighbour; no gateway route support at present + for i in range(pool_size): + br_ep = ep.bridged_eps[i] + await mctpd.system.add_neighbour(mctpd.system.Neighbour(iface, ep.lladdr, br_ep.eid)) + net = await mctpd_mctp_network_obj(dbus, iface.net) + # wait for the endpoint to be discovered via polling + await trio.sleep(5) for i in range(pool_size): br_ep = ep.bridged_eps[i] #check if the downstream endpoint eid is contiguous to the bridge endpoint eid assert (eid + i + 1) == br_ep.eid - (path, new) = await net.call_learn_endpoint(br_ep.eid) - assert path == f'/au/com/codeconstruct/mctp1/networks/1/endpoints/{br_ep.eid}' + dep = await mctpd_mctp_endpoint_common_obj(dbus, + f'/au/com/codeconstruct/mctp1/networks/1/endpoints/{br_ep.eid}' + ) + assert dep is not None \ No newline at end of file