Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
174 changes: 164 additions & 10 deletions drivers/net/ethernet/microsoft/mana/gdma_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/version.h>

#include <net/mana/mana.h>
#include <net/mana/hw_channel.h>

struct dentry *mana_debugfs_root;

Expand Down Expand Up @@ -63,6 +64,24 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
mana_gd_init_vf_regs(pdev);
}

/* Suppress logging when we set timeout to zero */
bool mana_need_log(struct gdma_context *gc, int err)
{
struct hw_channel_context *hwc;

if (err != -ETIMEDOUT)
return true;

if (!gc)
return true;

hwc = gc->hwc.driver_data;
if (hwc && hwc->hwc_timeout == 0)
return false;

return true;
}

static int mana_gd_query_max_resources(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
Expand Down Expand Up @@ -266,8 +285,9 @@ static int mana_gd_disable_queue(struct gdma_queue *queue)

err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
resp.hdr.status);
if (mana_need_log(gc, err))
dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
resp.hdr.status);
return err ? err : -EPROTO;
}

Expand Down Expand Up @@ -350,11 +370,113 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
head, arm_bit);
}

#define MANA_SERVICE_PERIOD 10

static void mana_serv_fpga(struct pci_dev *pdev)
{
struct pci_bus *bus, *parent;

pci_lock_rescan_remove();

bus = pdev->bus;
if (!bus) {
dev_err(&pdev->dev, "MANA service: no bus\n");
goto out;
}

parent = bus->parent;
if (!parent) {
dev_err(&pdev->dev, "MANA service: no parent bus\n");
goto out;
}

pci_stop_and_remove_bus_device(bus->self);

msleep(MANA_SERVICE_PERIOD * 1000);

pci_rescan_bus(parent);

out:
pci_unlock_rescan_remove();
}

static void mana_serv_reset(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
struct hw_channel_context *hwc;

if (!gc) {
dev_err(&pdev->dev, "MANA service: no GC\n");
return;
}

hwc = gc->hwc.driver_data;
if (!hwc) {
dev_err(&pdev->dev, "MANA service: no HWC\n");
goto out;
}

/* HWC is not responding in this case, so don't wait */
hwc->hwc_timeout = 0;

dev_info(&pdev->dev, "MANA reset cycle start\n");

mana_gd_suspend(pdev, PMSG_SUSPEND);

msleep(MANA_SERVICE_PERIOD * 1000);

mana_gd_resume(pdev);

dev_info(&pdev->dev, "MANA reset cycle completed\n");

out:
gc->in_service = false;
}

struct mana_serv_work {
struct work_struct serv_work;
struct pci_dev *pdev;
enum gdma_eqe_type type;
};

static void mana_serv_func(struct work_struct *w)
{
struct mana_serv_work *mns_wk;
struct pci_dev *pdev;

mns_wk = container_of(w, struct mana_serv_work, serv_work);
pdev = mns_wk->pdev;

if (!pdev)
goto out;

switch (mns_wk->type) {
case GDMA_EQE_HWC_FPGA_RECONFIG:
mana_serv_fpga(pdev);
break;

case GDMA_EQE_HWC_RESET_REQUEST:
mana_serv_reset(pdev);
break;

default:
dev_err(&pdev->dev, "MANA service: unknown type %d\n",
mns_wk->type);
break;
}

out:
pci_dev_put(pdev);
kfree(mns_wk);
module_put(THIS_MODULE);
}

static void mana_gd_process_eqe(struct gdma_queue *eq)
{
u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
struct gdma_context *gc = eq->gdma_dev->gdma_context;
struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
struct mana_serv_work *mns_wk;
union gdma_eqe_info eqe_info;
enum gdma_eqe_type type;
struct gdma_event event;
Expand Down Expand Up @@ -398,6 +520,35 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
eq->eq.callback(eq->eq.context, eq, &event);
break;

case GDMA_EQE_HWC_FPGA_RECONFIG:
case GDMA_EQE_HWC_RESET_REQUEST:
dev_info(gc->dev, "Recv MANA service type:%d\n", type);

if (gc->in_service) {
dev_info(gc->dev, "Already in service\n");
break;
}

if (!try_module_get(THIS_MODULE)) {
dev_info(gc->dev, "Module is unloading\n");
break;
}

mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
if (!mns_wk) {
module_put(THIS_MODULE);
break;
}

dev_info(gc->dev, "Start MANA service type:%d\n", type);
gc->in_service = true;
mns_wk->pdev = to_pci_dev(gc->dev);
mns_wk->type = type;
pci_dev_get(mns_wk->pdev);
INIT_WORK(&mns_wk->serv_work, mana_serv_func);
schedule_work(&mns_wk->serv_work);
break;

default:
break;
}
Expand Down Expand Up @@ -539,7 +690,8 @@ int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)

err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
dev_err(dev, "test_eq failed: %d\n", err);
if (mana_need_log(gc, err))
dev_err(dev, "test_eq failed: %d\n", err);
goto out;
}

Expand Down Expand Up @@ -574,7 +726,7 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,

if (flush_evenets) {
err = mana_gd_test_eq(gc, queue);
if (err)
if (err && mana_need_log(gc, err))
dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
}

Expand Down Expand Up @@ -720,8 +872,9 @@ int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)

err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
err, resp.hdr.status);
if (mana_need_log(gc, err))
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
err, resp.hdr.status);
return -EPROTO;
}

Expand Down Expand Up @@ -1020,8 +1173,9 @@ int mana_gd_deregister_device(struct gdma_dev *gd)

err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
err, resp.hdr.status);
if (mana_need_log(gc, err))
dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
err, resp.hdr.status);
if (!err)
err = -EPROTO;
}
Expand Down Expand Up @@ -1621,7 +1775,7 @@ static void mana_gd_remove(struct pci_dev *pdev)
}

/* The 'state' parameter is not used. */
static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct gdma_context *gc = pci_get_drvdata(pdev);

Expand All @@ -1636,7 +1790,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
* fail -- if this happens, it's safer to just report an error than try to undo
* what has been done.
*/
static int mana_gd_resume(struct pci_dev *pdev)
int mana_gd_resume(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
int err;
Expand Down
10 changes: 7 additions & 3 deletions drivers/net/ethernet/microsoft/mana/hw_channel.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
/* Copyright (c) 2021, Microsoft Corporation. */

#include <net/mana/gdma.h>
#include <net/mana/mana.h>
#include <net/mana/hw_channel.h>
#include <linux/vmalloc.h>

Expand Down Expand Up @@ -860,7 +861,9 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,

if (!wait_for_completion_timeout(&ctx->comp_event,
(msecs_to_jiffies(hwc->hwc_timeout)))) {
dev_err(hwc->dev, "HWC: Request timed out!\n");
if (hwc->hwc_timeout != 0)
dev_err(hwc->dev, "HWC: Request timed out!\n");

err = -ETIMEDOUT;
goto out;
}
Expand All @@ -871,8 +874,9 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
}

if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
ctx->status_code);
if (req_msg->req.msg_type != MANA_QUERY_PHY_STAT)
dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
ctx->status_code);
err = -EPROTO;
goto out;
}
Expand Down
Loading