@@ -190,6 +190,10 @@ struct peer {
190
190
uint8_t endpoint_type ;
191
191
uint8_t medium_spec ;
192
192
} recovery ;
193
+
194
+ // Pool size
195
+ uint8_t pool_size ;
196
+ uint8_t pool_start ;
193
197
};
194
198
195
199
struct ctx {
@@ -1364,7 +1368,7 @@ static int endpoint_query_phys(struct ctx *ctx, const dest_phys *dest,
1364
1368
}
1365
1369
1366
1370
/* returns -ECONNREFUSED if the endpoint returns failure. */
1367
- static int endpoint_send_set_endpoint_id (const struct peer * peer ,
1371
+ static int endpoint_send_set_endpoint_id (struct peer * peer ,
1368
1372
mctp_eid_t * new_eidp )
1369
1373
{
1370
1374
struct sockaddr_mctp_ext addr ;
@@ -1430,9 +1434,11 @@ static int endpoint_send_set_endpoint_id(const struct peer *peer,
1430
1434
1431
1435
alloc = resp -> status & 0x3 ;
1432
1436
if (alloc != 0 ) {
1433
- // TODO for bridges
1434
- warnx ("%s requested allocation pool, unimplemented" ,
1435
- dest_phys_tostr (dest ));
1437
+ peer -> pool_size = resp -> eid_pool_size ;
1438
+ if (peer -> ctx -> verbose ) {
1439
+ warnx ("%s requested allocation of pool size = %d" ,
1440
+ dest_phys_tostr (dest ), peer -> pool_size );
1441
+ }
1436
1442
}
1437
1443
1438
1444
rc = 0 ;
@@ -1660,15 +1666,37 @@ static int peer_set_mtu(struct ctx *ctx, struct peer *peer, uint32_t mtu)
1660
1666
return rc ;
1661
1667
}
1662
1668
1669
+ // checks if EIDs from bridge + 1 has contiguous max_pool_size available eids
1670
+ // returns next candidate eid for pool start
1671
+ static int get_next_pool_start (mctp_eid_t bridge_eid , struct net * n ,
1672
+ int max_pool_size )
1673
+ {
1674
+ if (bridge_eid + 1 + max_pool_size > eid_alloc_max ) {
1675
+ return - EADDRNOTAVAIL ;
1676
+ }
1677
+ for (mctp_eid_t e = bridge_eid + 1 ; e <= bridge_eid + max_pool_size ;
1678
+ e ++ ) {
1679
+ // found a bridge in between, need to skip its pool range
1680
+ if (n -> peers [e ] != NULL ) {
1681
+ e += n -> peers [e ]-> pool_size ;
1682
+ return e ;
1683
+ }
1684
+ }
1685
+ /* possible to have contiguous eids within
1686
+ bridge_eid to bridge_eid + max_pool_size */
1687
+ return bridge_eid + 1 ;
1688
+ }
1689
+
1663
1690
static int endpoint_assign_eid (struct ctx * ctx , sd_bus_error * berr ,
1664
1691
const dest_phys * dest , struct peer * * ret_peer ,
1665
- mctp_eid_t static_eid )
1692
+ mctp_eid_t static_eid , bool assign_bridge )
1666
1693
{
1667
1694
mctp_eid_t e , new_eid ;
1668
1695
struct net * n = NULL ;
1669
1696
struct peer * peer = NULL ;
1670
1697
uint32_t net ;
1671
1698
int rc ;
1699
+ bool is_pool_possible = false;
1672
1700
1673
1701
net = mctp_nl_net_byindex (ctx -> nl , dest -> ifindex );
1674
1702
if (!net ) {
@@ -1691,11 +1719,50 @@ static int endpoint_assign_eid(struct ctx *ctx, sd_bus_error *berr,
1691
1719
} else {
1692
1720
/* Find an unused EID */
1693
1721
for (e = eid_alloc_min ; e <= eid_alloc_max ; e ++ ) {
1694
- if (n -> peers [e ])
1722
+ if (n -> peers [e ]) {
1723
+ // used peer may be a bridge, skip its eid range
1724
+ e += n -> peers [e ]-> pool_size ;
1695
1725
continue ;
1726
+ }
1727
+
1728
+ // check for max sized pool from e + 1
1729
+ if (assign_bridge ) {
1730
+ int next_pool_start = get_next_pool_start (
1731
+ e , n , ctx -> max_pool_size );
1732
+ if (next_pool_start < 0 ) {
1733
+ warnx ("Ran out of EIDs from net %d while"
1734
+ "allocating bridge downstream endpoint at %s " ,
1735
+ net , dest_phys_tostr (dest ));
1736
+ is_pool_possible = false;
1737
+ /*ran out of pool eid : set only bridge eid then
1738
+ find first available bridge eid which is not part of any pool*/
1739
+ for (e = eid_alloc_min ;
1740
+ e <= eid_alloc_max ; e ++ ) {
1741
+ if (n -> peers [e ]) {
1742
+ // used peer may be a bridge, skip its eid range
1743
+ e += n -> peers [e ]
1744
+ -> pool_size ;
1745
+ continue ;
1746
+ }
1747
+ break ;
1748
+ }
1749
+ } else if (next_pool_start != e + 1 ) {
1750
+ // e doesn't have any contiguous max pool size eids available
1751
+ e += next_pool_start ;
1752
+ continue ;
1753
+ } else {
1754
+ // found contigous eids of max_pool_size from bridge_eid
1755
+ is_pool_possible = true;
1756
+ }
1757
+ }
1758
+
1696
1759
rc = add_peer (ctx , dest , e , net , & peer );
1697
1760
if (rc < 0 )
1698
1761
return rc ;
1762
+ if (assign_bridge && is_pool_possible ) {
1763
+ peer -> pool_size = ctx -> max_pool_size ;
1764
+ peer -> pool_start = e + 1 ;
1765
+ }
1699
1766
break ;
1700
1767
}
1701
1768
if (e > eid_alloc_max ) {
@@ -1718,6 +1785,10 @@ static int endpoint_assign_eid(struct ctx *ctx, sd_bus_error *berr,
1718
1785
}
1719
1786
1720
1787
if (new_eid != peer -> eid ) {
1788
+ // avoid allocation for any different EID in response
1789
+ warnx ("Mismatch of requested from received EID, resetting the pool" );
1790
+ peer -> pool_size = 0 ;
1791
+ peer -> pool_start = 0 ;
1721
1792
rc = change_peer_eid (peer , new_eid );
1722
1793
if (rc == - EEXIST ) {
1723
1794
sd_bus_error_setf (
@@ -2117,7 +2188,7 @@ static int method_setup_endpoint(sd_bus_message *call, void *data,
2117
2188
}
2118
2189
2119
2190
/* Set Endpoint ID */
2120
- rc = endpoint_assign_eid (ctx , berr , dest , & peer , 0 );
2191
+ rc = endpoint_assign_eid (ctx , berr , dest , & peer , 0 , false );
2121
2192
if (rc < 0 )
2122
2193
goto err ;
2123
2194
@@ -2170,21 +2241,41 @@ static int method_assign_endpoint(sd_bus_message *call, void *data,
2170
2241
peer -> net , peer_path , 0 );
2171
2242
}
2172
2243
2173
- rc = endpoint_assign_eid (ctx , berr , dest , & peer , 0 );
2244
+ rc = endpoint_assign_eid (ctx , berr , dest , & peer , 0 , true );
2174
2245
if (rc < 0 )
2175
2246
goto err ;
2176
2247
2177
2248
peer_path = path_from_peer (peer );
2178
2249
if (!peer_path )
2179
2250
goto err ;
2180
2251
2252
+ if (peer -> pool_size > 0 ) {
2253
+ // Call for Allocate EndpointID
2254
+ }
2255
+
2181
2256
return sd_bus_reply_method_return (call , "yisb" , peer -> eid , peer -> net ,
2182
2257
peer_path , 1 );
2183
2258
err :
2184
2259
set_berr (ctx , rc , berr );
2185
2260
return rc ;
2186
2261
}
2187
2262
2263
+ // Checks if given EID belongs to any bridge's pool range
2264
+ static bool is_eid_in_bridge_pool (struct net * n , mctp_eid_t eid )
2265
+ {
2266
+ for (int i = eid_alloc_min ; i <= eid ; i ++ ) {
2267
+ struct peer * peer = n -> peers [i ];
2268
+ if (peer && peer -> pool_size > 0 ) {
2269
+ if (eid >= peer -> pool_start &&
2270
+ eid < peer -> pool_start + peer -> pool_size ) {
2271
+ return true;
2272
+ }
2273
+ i += peer -> pool_size ;
2274
+ }
2275
+ }
2276
+ return false;
2277
+ }
2278
+
2188
2279
static int method_assign_endpoint_static (sd_bus_message * call , void * data ,
2189
2280
sd_bus_error * berr )
2190
2281
{
@@ -2239,10 +2330,22 @@ static int method_assign_endpoint_static(sd_bus_message *call, void *data,
2239
2330
return sd_bus_error_setf (berr ,
2240
2331
SD_BUS_ERROR_INVALID_ARGS ,
2241
2332
"Address in use" );
2333
+ } else {
2334
+ // is requested EID part of any bridge pool range
2335
+ struct net * n = lookup_net (ctx , netid );
2336
+ if (!n ) {
2337
+ bug_warn ("%s: Bad old net %d" , __func__ , netid );
2338
+ return - EPROTO ;
2339
+ }
2340
+ if (is_eid_in_bridge_pool (n , eid )) {
2341
+ return sd_bus_error_setf (
2342
+ berr , SD_BUS_ERROR_INVALID_ARGS ,
2343
+ "EID belongs to another MCTP bridge pool" );
2344
+ }
2242
2345
}
2243
2346
}
2244
2347
2245
- rc = endpoint_assign_eid (ctx , berr , dest , & peer , eid );
2348
+ rc = endpoint_assign_eid (ctx , berr , dest , & peer , eid , false );
2246
2349
if (rc < 0 ) {
2247
2350
goto err ;
2248
2351
}
@@ -2652,7 +2755,8 @@ static int peer_endpoint_recover(sd_event_source *s, uint64_t usec,
2652
2755
* after which we immediately return as there's no old peer state left to
2653
2756
* maintain.
2654
2757
*/
2655
- return endpoint_assign_eid (ctx , NULL , & phys , & peer , 0 );
2758
+ return endpoint_assign_eid (ctx , NULL , & phys , & peer , 0 ,
2759
+ false);
2656
2760
}
2657
2761
2658
2762
/* Confirmation of the same device, apply its already allocated EID */
0 commit comments