mirror of
https://github.com/freebsd/freebsd-src.git
synced 2024-12-03 21:18:59 +00:00
import vendor fixes to cxgb
This commit is contained in:
parent
7a4e8171ba
commit
4af83c8cff
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=180583
@ -525,6 +525,7 @@ dev/cxgb/common/cxgb_ael1002.c optional cxgb pci
|
||||
dev/cxgb/common/cxgb_mv88e1xxx.c optional cxgb pci
|
||||
dev/cxgb/common/cxgb_xgmac.c optional cxgb pci
|
||||
dev/cxgb/common/cxgb_t3_hw.c optional cxgb pci
|
||||
dev/cxgb/common/cxgb_tn1010.c optional cxgb pci
|
||||
dev/cxgb/sys/uipc_mvec.c optional cxgb pci
|
||||
dev/cxgb/sys/cxgb_support.c optional cxgb pci
|
||||
dev/cxgb/cxgb_t3fw.c optional cxgb cxgb_t3fw
|
||||
|
@ -46,11 +46,32 @@ enum {
|
||||
AEL1002_PWR_DOWN_LO = 0xc012,
|
||||
AEL1002_XFI_EQL = 0xc015,
|
||||
AEL1002_LB_EN = 0xc017,
|
||||
|
||||
LASI_CTRL = 0x9002,
|
||||
LASI_STAT = 0x9005
|
||||
AEL_OPT_SETTINGS = 0xc017,
|
||||
};
|
||||
|
||||
struct reg_val {
|
||||
unsigned short mmd_addr;
|
||||
unsigned short reg_addr;
|
||||
unsigned short clear_bits;
|
||||
unsigned short set_bits;
|
||||
};
|
||||
|
||||
static int set_phy_regs(struct cphy *phy, const struct reg_val *rv)
|
||||
{
|
||||
int err;
|
||||
|
||||
for (err = 0; rv->mmd_addr && !err; rv++) {
|
||||
if (rv->clear_bits == 0xffff)
|
||||
err = mdio_write(phy, rv->mmd_addr, rv->reg_addr,
|
||||
rv->set_bits);
|
||||
else
|
||||
err = t3_mdio_change_bits(phy, rv->mmd_addr,
|
||||
rv->reg_addr, rv->clear_bits,
|
||||
rv->set_bits);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ael100x_txon(struct cphy *phy)
|
||||
{
|
||||
int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
|
||||
@ -158,33 +179,6 @@ static int ael1006_reset(struct cphy *phy, int wait)
|
||||
return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
|
||||
}
|
||||
|
||||
static int ael1006_intr_enable(struct cphy *phy)
|
||||
{
|
||||
return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
|
||||
}
|
||||
|
||||
static int ael1006_intr_disable(struct cphy *phy)
|
||||
{
|
||||
return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
|
||||
}
|
||||
|
||||
static int ael1006_intr_clear(struct cphy *phy)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
|
||||
}
|
||||
|
||||
static int ael1006_intr_handler(struct cphy *phy)
|
||||
{
|
||||
unsigned int status;
|
||||
int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
return (status & 1) ? cphy_cause_link_change : 0;
|
||||
}
|
||||
|
||||
static int ael1006_power_down(struct cphy *phy, int enable)
|
||||
{
|
||||
return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
|
||||
@ -194,10 +188,10 @@ static int ael1006_power_down(struct cphy *phy, int enable)
|
||||
#ifdef C99_NOT_SUPPORTED
|
||||
static struct cphy_ops ael1006_ops = {
|
||||
ael1006_reset,
|
||||
ael1006_intr_enable,
|
||||
ael1006_intr_disable,
|
||||
ael1006_intr_clear,
|
||||
ael1006_intr_handler,
|
||||
t3_phy_lasi_intr_enable,
|
||||
t3_phy_lasi_intr_disable,
|
||||
t3_phy_lasi_intr_clear,
|
||||
t3_phy_lasi_intr_handler,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
@ -209,10 +203,10 @@ static struct cphy_ops ael1006_ops = {
|
||||
#else
|
||||
static struct cphy_ops ael1006_ops = {
|
||||
.reset = ael1006_reset,
|
||||
.intr_enable = ael1006_intr_enable,
|
||||
.intr_disable = ael1006_intr_disable,
|
||||
.intr_clear = ael1006_intr_clear,
|
||||
.intr_handler = ael1006_intr_handler,
|
||||
.intr_enable = t3_phy_lasi_intr_enable,
|
||||
.intr_disable = t3_phy_lasi_intr_disable,
|
||||
.intr_clear = t3_phy_lasi_intr_clear,
|
||||
.intr_handler = t3_phy_lasi_intr_handler,
|
||||
.get_link_status = ael100x_get_link_status,
|
||||
.power_down = ael1006_power_down,
|
||||
};
|
||||
@ -228,13 +222,382 @@ int t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ael2005_setup_sr_edc(struct cphy *phy)
|
||||
{
|
||||
static u16 sr_edc[] = {
|
||||
0xcc00, 0x2ff4,
|
||||
0xcc01, 0x3cd4,
|
||||
0xcc02, 0x2015,
|
||||
0xcc03, 0x3105,
|
||||
0xcc04, 0x6524,
|
||||
0xcc05, 0x27ff,
|
||||
0xcc06, 0x300f,
|
||||
0xcc07, 0x2c8b,
|
||||
0xcc08, 0x300b,
|
||||
0xcc09, 0x4009,
|
||||
0xcc0a, 0x400e,
|
||||
0xcc0b, 0x2f72,
|
||||
0xcc0c, 0x3002,
|
||||
0xcc0d, 0x1002,
|
||||
0xcc0e, 0x2172,
|
||||
0xcc0f, 0x3012,
|
||||
0xcc10, 0x1002,
|
||||
0xcc11, 0x25d2,
|
||||
0xcc12, 0x3012,
|
||||
0xcc13, 0x1002,
|
||||
0xcc14, 0xd01e,
|
||||
0xcc15, 0x27d2,
|
||||
0xcc16, 0x3012,
|
||||
0xcc17, 0x1002,
|
||||
0xcc18, 0x2004,
|
||||
0xcc19, 0x3c84,
|
||||
0xcc1a, 0x6436,
|
||||
0xcc1b, 0x2007,
|
||||
0xcc1c, 0x3f87,
|
||||
0xcc1d, 0x8676,
|
||||
0xcc1e, 0x40b7,
|
||||
0xcc1f, 0xa746,
|
||||
0xcc20, 0x4047,
|
||||
0xcc21, 0x5673,
|
||||
0xcc22, 0x2982,
|
||||
0xcc23, 0x3002,
|
||||
0xcc24, 0x13d2,
|
||||
0xcc25, 0x8bbd,
|
||||
0xcc26, 0x2862,
|
||||
0xcc27, 0x3012,
|
||||
0xcc28, 0x1002,
|
||||
0xcc29, 0x2092,
|
||||
0xcc2a, 0x3012,
|
||||
0xcc2b, 0x1002,
|
||||
0xcc2c, 0x5cc3,
|
||||
0xcc2d, 0x314,
|
||||
0xcc2e, 0x2942,
|
||||
0xcc2f, 0x3002,
|
||||
0xcc30, 0x1002,
|
||||
0xcc31, 0xd019,
|
||||
0xcc32, 0x2032,
|
||||
0xcc33, 0x3012,
|
||||
0xcc34, 0x1002,
|
||||
0xcc35, 0x2a04,
|
||||
0xcc36, 0x3c74,
|
||||
0xcc37, 0x6435,
|
||||
0xcc38, 0x2fa4,
|
||||
0xcc39, 0x3cd4,
|
||||
0xcc3a, 0x6624,
|
||||
0xcc3b, 0x5563,
|
||||
0xcc3c, 0x2d42,
|
||||
0xcc3d, 0x3002,
|
||||
0xcc3e, 0x13d2,
|
||||
0xcc3f, 0x464d,
|
||||
0xcc40, 0x2862,
|
||||
0xcc41, 0x3012,
|
||||
0xcc42, 0x1002,
|
||||
0xcc43, 0x2032,
|
||||
0xcc44, 0x3012,
|
||||
0xcc45, 0x1002,
|
||||
0xcc46, 0x2fb4,
|
||||
0xcc47, 0x3cd4,
|
||||
0xcc48, 0x6624,
|
||||
0xcc49, 0x5563,
|
||||
0xcc4a, 0x2d42,
|
||||
0xcc4b, 0x3002,
|
||||
0xcc4c, 0x13d2,
|
||||
0xcc4d, 0x2ed2,
|
||||
0xcc4e, 0x3002,
|
||||
0xcc4f, 0x1002,
|
||||
0xcc50, 0x2fd2,
|
||||
0xcc51, 0x3002,
|
||||
0xcc52, 0x1002,
|
||||
0xcc53, 0x004,
|
||||
0xcc54, 0x2942,
|
||||
0xcc55, 0x3002,
|
||||
0xcc56, 0x1002,
|
||||
0xcc57, 0x2092,
|
||||
0xcc58, 0x3012,
|
||||
0xcc59, 0x1002,
|
||||
0xcc5a, 0x5cc3,
|
||||
0xcc5b, 0x317,
|
||||
0xcc5c, 0x2f72,
|
||||
0xcc5d, 0x3002,
|
||||
0xcc5e, 0x1002,
|
||||
0xcc5f, 0x2942,
|
||||
0xcc60, 0x3002,
|
||||
0xcc61, 0x1002,
|
||||
0xcc62, 0x22cd,
|
||||
0xcc63, 0x301d,
|
||||
0xcc64, 0x2862,
|
||||
0xcc65, 0x3012,
|
||||
0xcc66, 0x1002,
|
||||
0xcc67, 0x2ed2,
|
||||
0xcc68, 0x3002,
|
||||
0xcc69, 0x1002,
|
||||
0xcc6a, 0x2d72,
|
||||
0xcc6b, 0x3002,
|
||||
0xcc6c, 0x1002,
|
||||
0xcc6d, 0x628f,
|
||||
0xcc6e, 0x2112,
|
||||
0xcc6f, 0x3012,
|
||||
0xcc70, 0x1002,
|
||||
0xcc71, 0x5aa3,
|
||||
0xcc72, 0x2dc2,
|
||||
0xcc73, 0x3002,
|
||||
0xcc74, 0x1312,
|
||||
0xcc75, 0x6f72,
|
||||
0xcc76, 0x1002,
|
||||
0xcc77, 0x2807,
|
||||
0xcc78, 0x31a7,
|
||||
0xcc79, 0x20c4,
|
||||
0xcc7a, 0x3c24,
|
||||
0xcc7b, 0x6724,
|
||||
0xcc7c, 0x1002,
|
||||
0xcc7d, 0x2807,
|
||||
0xcc7e, 0x3187,
|
||||
0xcc7f, 0x20c4,
|
||||
0xcc80, 0x3c24,
|
||||
0xcc81, 0x6724,
|
||||
0xcc82, 0x1002,
|
||||
0xcc83, 0x2514,
|
||||
0xcc84, 0x3c64,
|
||||
0xcc85, 0x6436,
|
||||
0xcc86, 0xdff4,
|
||||
0xcc87, 0x6436,
|
||||
0xcc88, 0x1002,
|
||||
0xcc89, 0x40a4,
|
||||
0xcc8a, 0x643c,
|
||||
0xcc8b, 0x4016,
|
||||
0xcc8c, 0x8c6c,
|
||||
0xcc8d, 0x2b24,
|
||||
0xcc8e, 0x3c24,
|
||||
0xcc8f, 0x6435,
|
||||
0xcc90, 0x1002,
|
||||
0xcc91, 0x2b24,
|
||||
0xcc92, 0x3c24,
|
||||
0xcc93, 0x643a,
|
||||
0xcc94, 0x4025,
|
||||
0xcc95, 0x8a5a,
|
||||
0xcc96, 0x1002,
|
||||
0xcc97, 0x2731,
|
||||
0xcc98, 0x3011,
|
||||
0xcc99, 0x1001,
|
||||
0xcc9a, 0xc7a0,
|
||||
0xcc9b, 0x100,
|
||||
0xcc9c, 0xc502,
|
||||
0xcc9d, 0x53ac,
|
||||
0xcc9e, 0xc503,
|
||||
0xcc9f, 0xd5d5,
|
||||
0xcca0, 0xc600,
|
||||
0xcca1, 0x2a6d,
|
||||
0xcca2, 0xc601,
|
||||
0xcca3, 0x2a4c,
|
||||
0xcca4, 0xc602,
|
||||
0xcca5, 0x111,
|
||||
0xcca6, 0xc60c,
|
||||
0xcca7, 0x5900,
|
||||
0xcca8, 0xc710,
|
||||
0xcca9, 0x700,
|
||||
0xccaa, 0xc718,
|
||||
0xccab, 0x700,
|
||||
0xccac, 0xc720,
|
||||
0xccad, 0x4700,
|
||||
0xccae, 0xc801,
|
||||
0xccaf, 0x7f50,
|
||||
0xccb0, 0xc802,
|
||||
0xccb1, 0x7760,
|
||||
0xccb2, 0xc803,
|
||||
0xccb3, 0x7fce,
|
||||
0xccb4, 0xc804,
|
||||
0xccb5, 0x5700,
|
||||
0xccb6, 0xc805,
|
||||
0xccb7, 0x5f11,
|
||||
0xccb8, 0xc806,
|
||||
0xccb9, 0x4751,
|
||||
0xccba, 0xc807,
|
||||
0xccbb, 0x57e1,
|
||||
0xccbc, 0xc808,
|
||||
0xccbd, 0x2700,
|
||||
0xccbe, 0xc809,
|
||||
0xccbf, 0x000,
|
||||
0xccc0, 0xc821,
|
||||
0xccc1, 0x002,
|
||||
0xccc2, 0xc822,
|
||||
0xccc3, 0x014,
|
||||
0xccc4, 0xc832,
|
||||
0xccc5, 0x1186,
|
||||
0xccc6, 0xc847,
|
||||
0xccc7, 0x1e02,
|
||||
0xccc8, 0xc013,
|
||||
0xccc9, 0xf341,
|
||||
0xccca, 0xc01a,
|
||||
0xcccb, 0x446,
|
||||
0xcccc, 0xc024,
|
||||
0xcccd, 0x1000,
|
||||
0xccce, 0xc025,
|
||||
0xcccf, 0xa00,
|
||||
0xccd0, 0xc026,
|
||||
0xccd1, 0xc0c,
|
||||
0xccd2, 0xc027,
|
||||
0xccd3, 0xc0c,
|
||||
0xccd4, 0xc029,
|
||||
0xccd5, 0x0a0,
|
||||
0xccd6, 0xc030,
|
||||
0xccd7, 0xa00,
|
||||
0xccd8, 0xc03c,
|
||||
0xccd9, 0x01c,
|
||||
0xccda, 0xc005,
|
||||
0xccdb, 0x7a06,
|
||||
0xccdc, 0x000,
|
||||
0xccdd, 0x2731,
|
||||
0xccde, 0x3011,
|
||||
0xccdf, 0x1001,
|
||||
0xcce0, 0xc620,
|
||||
0xcce1, 0x000,
|
||||
0xcce2, 0xc621,
|
||||
0xcce3, 0x03f,
|
||||
0xcce4, 0xc622,
|
||||
0xcce5, 0x000,
|
||||
0xcce6, 0xc623,
|
||||
0xcce7, 0x000,
|
||||
0xcce8, 0xc624,
|
||||
0xcce9, 0x000,
|
||||
0xccea, 0xc625,
|
||||
0xcceb, 0x000,
|
||||
0xccec, 0xc627,
|
||||
0xcced, 0x000,
|
||||
0xccee, 0xc628,
|
||||
0xccef, 0x000,
|
||||
0xccf0, 0xc62c,
|
||||
0xccf1, 0x000,
|
||||
0xccf2, 0x000,
|
||||
0xccf3, 0x2806,
|
||||
0xccf4, 0x3cb6,
|
||||
0xccf5, 0xc161,
|
||||
0xccf6, 0x6134,
|
||||
0xccf7, 0x6135,
|
||||
0xccf8, 0x5443,
|
||||
0xccf9, 0x303,
|
||||
0xccfa, 0x6524,
|
||||
0xccfb, 0x00b,
|
||||
0xccfc, 0x1002,
|
||||
0xccfd, 0x2104,
|
||||
0xccfe, 0x3c24,
|
||||
0xccff, 0x2105,
|
||||
0xcd00, 0x3805,
|
||||
0xcd01, 0x6524,
|
||||
0xcd02, 0xdff4,
|
||||
0xcd03, 0x4005,
|
||||
0xcd04, 0x6524,
|
||||
0xcd05, 0x1002,
|
||||
0xcd06, 0x5dd3,
|
||||
0xcd07, 0x306,
|
||||
0xcd08, 0x2ff7,
|
||||
0xcd09, 0x38f7,
|
||||
0xcd0a, 0x60b7,
|
||||
0xcd0b, 0xdffd,
|
||||
0xcd0c, 0x00a,
|
||||
0xcd0d, 0x1002,
|
||||
0xcd0e, 0
|
||||
};
|
||||
int i, err;
|
||||
|
||||
for (err = i = 0; i < ARRAY_SIZE(sr_edc) && !err; i += 2)
|
||||
err = mdio_write(phy, MDIO_DEV_PMA_PMD, sr_edc[i],
|
||||
sr_edc[i + 1]);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ael2005_reset(struct cphy *phy, int wait)
|
||||
{
|
||||
static struct reg_val regs0[] = {
|
||||
{ MDIO_DEV_PMA_PMD, 0xc001, 0, 1 << 5 },
|
||||
{ MDIO_DEV_PMA_PMD, 0xc017, 0, 1 << 5 },
|
||||
{ MDIO_DEV_PMA_PMD, 0xc013, 0xffff, 0xf341 },
|
||||
{ MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8000 },
|
||||
{ MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8100 },
|
||||
{ MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8000 },
|
||||
{ MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0 },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
static struct reg_val regs1[] = {
|
||||
{ MDIO_DEV_PMA_PMD, 0xc003, 0xffff, 0x181 },
|
||||
{ MDIO_DEV_PMA_PMD, 0xc010, 0xffff, 0x448a },
|
||||
{ MDIO_DEV_PMA_PMD, 0xc04a, 0xffff, 0x5200 },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
static struct reg_val regs2[] = {
|
||||
{ MDIO_DEV_PMA_PMD, 0xca00, 0xffff, 0x0080 },
|
||||
{ MDIO_DEV_PMA_PMD, 0xca12, 0xffff, 0 },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
int err;
|
||||
|
||||
err = t3_phy_reset(phy, MDIO_DEV_PMA_PMD, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
msleep(125);
|
||||
err = set_phy_regs(phy, regs0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
msleep(50);
|
||||
err = set_phy_regs(phy, regs1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
msleep(50);
|
||||
err = ael2005_setup_sr_edc(phy);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return set_phy_regs(phy, regs2);
|
||||
}
|
||||
|
||||
#ifdef C99_NOT_SUPPORTED
|
||||
static struct cphy_ops ael2005_ops = {
|
||||
ael2005_reset,
|
||||
t3_phy_lasi_intr_enable,
|
||||
t3_phy_lasi_intr_disable,
|
||||
t3_phy_lasi_intr_clear,
|
||||
t3_phy_lasi_intr_handler,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
ael100x_get_link_status,
|
||||
ael1002_power_down,
|
||||
};
|
||||
#else
|
||||
static struct cphy_ops ael2005_ops = {
|
||||
.reset = ael2005_reset,
|
||||
.intr_enable = t3_phy_lasi_intr_enable,
|
||||
.intr_disable = t3_phy_lasi_intr_disable,
|
||||
.intr_clear = t3_phy_lasi_intr_clear,
|
||||
.intr_handler = t3_phy_lasi_intr_handler,
|
||||
.get_link_status = ael100x_get_link_status,
|
||||
.power_down = ael1002_power_down,
|
||||
};
|
||||
#endif
|
||||
|
||||
int t3_ael2005_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops)
|
||||
{
|
||||
cphy_init(phy, adapter, phy_addr, &ael2005_ops, mdio_ops,
|
||||
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
|
||||
"10GBASE-R");
|
||||
msleep(125);
|
||||
return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL_OPT_SETTINGS, 0,
|
||||
1 << 5);
|
||||
}
|
||||
|
||||
#ifdef C99_NOT_SUPPORTED
|
||||
static struct cphy_ops qt2045_ops = {
|
||||
ael1006_reset,
|
||||
ael1006_intr_enable,
|
||||
ael1006_intr_disable,
|
||||
ael1006_intr_clear,
|
||||
ael1006_intr_handler,
|
||||
t3_phy_lasi_intr_enable,
|
||||
t3_phy_lasi_intr_disable,
|
||||
t3_phy_lasi_intr_clear,
|
||||
t3_phy_lasi_intr_handler,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
@ -246,10 +609,10 @@ static struct cphy_ops qt2045_ops = {
|
||||
#else
|
||||
static struct cphy_ops qt2045_ops = {
|
||||
.reset = ael1006_reset,
|
||||
.intr_enable = ael1006_intr_enable,
|
||||
.intr_disable = ael1006_intr_disable,
|
||||
.intr_clear = ael1006_intr_clear,
|
||||
.intr_handler = ael1006_intr_handler,
|
||||
.intr_enable = t3_phy_lasi_intr_enable,
|
||||
.intr_disable = t3_phy_lasi_intr_disable,
|
||||
.intr_clear = t3_phy_lasi_intr_clear,
|
||||
.intr_handler = t3_phy_lasi_intr_handler,
|
||||
.get_link_status = ael100x_get_link_status,
|
||||
.power_down = ael1006_power_down,
|
||||
};
|
||||
|
@ -47,10 +47,7 @@ enum {
|
||||
NCCTRL_WIN = 32, /* # of congestion control windows */
|
||||
NTX_SCHED = 8, /* # of HW Tx scheduling queues */
|
||||
PROTO_SRAM_LINES = 128, /* size of protocol sram */
|
||||
MAX_NPORTS = 4,
|
||||
TP_TMR_RES = 200,
|
||||
TP_SRAM_OFFSET = 4096, /* TP SRAM content offset in eeprom */
|
||||
TP_SRAM_LEN = 2112, /* TP SRAM content offset in eeprom */
|
||||
EXACT_ADDR_FILTERS = 8, /* # of HW exact match filters */
|
||||
};
|
||||
|
||||
#define MAX_RX_COALESCING_LEN 12288U
|
||||
@ -122,8 +119,8 @@ enum {
|
||||
};
|
||||
|
||||
struct sg_ent { /* SGE scatter/gather entry */
|
||||
u32 len[2];
|
||||
u64 addr[2];
|
||||
__be32 len[2];
|
||||
__be64 addr[2];
|
||||
};
|
||||
|
||||
#ifndef SGE_NUM_GENBITS
|
||||
@ -150,7 +147,7 @@ struct adapter_info {
|
||||
unsigned char mdien:1;
|
||||
unsigned char mdiinv:1;
|
||||
unsigned int gpio_out; /* GPIO output settings */
|
||||
unsigned int gpio_intr; /* GPIO IRQ enable mask */
|
||||
unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
|
||||
unsigned long caps; /* adapter capabilities */
|
||||
const struct mdio_ops *mdio_ops; /* MDIO operations */
|
||||
const char *desc; /* product description */
|
||||
@ -159,8 +156,6 @@ struct adapter_info {
|
||||
struct port_type_info {
|
||||
int (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *ops);
|
||||
|
||||
|
||||
};
|
||||
|
||||
struct mc5_stats {
|
||||
@ -307,7 +302,7 @@ struct tp_params {
|
||||
struct qset_params { /* SGE queue set parameters */
|
||||
unsigned int polling; /* polling/interrupt service for rspq */
|
||||
unsigned int lro; /* large receive offload */
|
||||
unsigned int coalesce_nsecs; /* irq coalescing timer */
|
||||
unsigned int coalesce_usecs; /* irq coalescing timer */
|
||||
unsigned int rspq_size; /* # of entries in response queue */
|
||||
unsigned int fl_size; /* # of entries in regular free list */
|
||||
unsigned int jumbo_size; /* # of entries in jumbo free list */
|
||||
@ -486,12 +481,25 @@ enum {
|
||||
MAC_RXFIFO_SIZE = 32768
|
||||
};
|
||||
|
||||
/* IEEE 802.3ae specified MDIO devices */
|
||||
/* IEEE 802.3 specified MDIO devices */
|
||||
enum {
|
||||
MDIO_DEV_PMA_PMD = 1,
|
||||
MDIO_DEV_WIS = 2,
|
||||
MDIO_DEV_PCS = 3,
|
||||
MDIO_DEV_XGXS = 4
|
||||
MDIO_DEV_XGXS = 4,
|
||||
MDIO_DEV_ANEG = 7,
|
||||
MDIO_DEV_VEND1 = 30,
|
||||
MDIO_DEV_VEND2 = 31
|
||||
};
|
||||
|
||||
/* LASI control and status registers */
|
||||
enum {
|
||||
RX_ALARM_CTRL = 0x9000,
|
||||
TX_ALARM_CTRL = 0x9001,
|
||||
LASI_CTRL = 0x9002,
|
||||
RX_ALARM_STAT = 0x9003,
|
||||
TX_ALARM_STAT = 0x9004,
|
||||
LASI_STAT = 0x9005
|
||||
};
|
||||
|
||||
/* PHY loopback direction */
|
||||
@ -556,8 +564,8 @@ static inline int mdio_write(struct cphy *phy, int mmd, int reg,
|
||||
/* Convenience initializer */
|
||||
static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
|
||||
int phy_addr, struct cphy_ops *phy_ops,
|
||||
const struct mdio_ops *mdio_ops, unsigned int caps,
|
||||
const char *desc)
|
||||
const struct mdio_ops *mdio_ops, unsigned int caps,
|
||||
const char *desc)
|
||||
{
|
||||
phy->adapter = adapter;
|
||||
phy->addr = phy_addr;
|
||||
@ -651,7 +659,12 @@ int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
|
||||
unsigned int set);
|
||||
int t3_phy_reset(struct cphy *phy, int mmd, int wait);
|
||||
int t3_phy_advertise(struct cphy *phy, unsigned int advert);
|
||||
int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert);
|
||||
int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
|
||||
int t3_phy_lasi_intr_enable(struct cphy *phy);
|
||||
int t3_phy_lasi_intr_disable(struct cphy *phy);
|
||||
int t3_phy_lasi_intr_clear(struct cphy *phy);
|
||||
int t3_phy_lasi_intr_handler(struct cphy *phy);
|
||||
|
||||
void t3_intr_enable(adapter_t *adapter);
|
||||
void t3_intr_disable(adapter_t *adapter);
|
||||
@ -673,10 +686,10 @@ int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
|
||||
int t3_get_tp_version(adapter_t *adapter, u32 *vers);
|
||||
int t3_check_tpsram_version(adapter_t *adapter, int *must_load);
|
||||
int t3_check_tpsram(adapter_t *adapter, const u8 *tp_ram, unsigned int size);
|
||||
int t3_load_fw(adapter_t *adapter, const const u8 *fw_data, unsigned int size);
|
||||
int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size);
|
||||
int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size);
|
||||
int t3_get_fw_version(adapter_t *adapter, u32 *vers);
|
||||
int t3_check_fw_version(adapter_t *adapter, int *must_load);
|
||||
int t3_load_boot(adapter_t *adapter, u8 *fw_data, unsigned int size);
|
||||
int t3_init_hw(adapter_t *adapter, u32 fw_params);
|
||||
void mac_prep(struct cmac *mac, adapter_t *adapter, int index);
|
||||
void early_hw_init(adapter_t *adapter, const struct adapter_info *ai);
|
||||
@ -684,8 +697,8 @@ int t3_prep_adapter(adapter_t *adapter, const struct adapter_info *ai, int reset
|
||||
void t3_led_ready(adapter_t *adapter);
|
||||
void t3_fatal_err(adapter_t *adapter);
|
||||
void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on);
|
||||
void t3_tp_set_offload_mode(adapter_t *adap, int enable);
|
||||
void t3_enable_filters(adapter_t *adap);
|
||||
void t3_tp_set_offload_mode(adapter_t *adap, int enable);
|
||||
void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
|
||||
const u16 *rspq);
|
||||
int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map);
|
||||
@ -719,7 +732,7 @@ void t3_mc5_intr_handler(struct mc5 *mc5);
|
||||
int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
|
||||
u32 *buf);
|
||||
|
||||
#if defined(CONFIG_CHELSIO_T3_CORE)
|
||||
#ifdef CONFIG_CHELSIO_T3_CORE
|
||||
int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh);
|
||||
void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size);
|
||||
void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps);
|
||||
@ -774,21 +787,22 @@ int t3_vsc7323_set_mtu(adapter_t *adap, unsigned int mtu, int port);
|
||||
int t3_vsc7323_set_addr(adapter_t *adap, u8 addr[6], int port);
|
||||
int t3_vsc7323_enable(adapter_t *adap, int port, int which);
|
||||
int t3_vsc7323_disable(adapter_t *adap, int port, int which);
|
||||
|
||||
int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert);
|
||||
|
||||
const struct mac_stats *t3_vsc7323_update_stats(struct cmac *mac);
|
||||
|
||||
int t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
const struct mdio_ops *mdio_ops);
|
||||
int t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
int t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
int t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
int t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
int t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
int t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
int t3_ael2005_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
int t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
int t3_tn1010_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
int t3_xaui_direct_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops);
|
||||
const struct mdio_ops *mdio_ops);
|
||||
#endif /* __CHELSIO_COMMON_H */
|
||||
|
@ -326,9 +326,16 @@ static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
|
||||
V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialization that requires the OS and protocol layers to already
|
||||
* be intialized goes here.
|
||||
/**
|
||||
* t3_mc5_init - initialize MC5 and the TCAM
|
||||
* @mc5: the MC5 handle
|
||||
* @nservers: desired number the TCP servers (listening ports)
|
||||
* @nfilters: desired number of HW filters (classifiers)
|
||||
* @nroutes: desired number of routes
|
||||
*
|
||||
* Initialize MC5 and the TCAM and partition the TCAM for the requested
|
||||
* number of servers, filters, and routes. The number of routes is
|
||||
* typically 0 except for specialized uses of the T3 adapters.
|
||||
*/
|
||||
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
|
||||
unsigned int nroutes)
|
||||
@ -344,7 +351,7 @@ int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
|
||||
if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (nfilters && adap->params.rev < T3_REV_C)
|
||||
if (nfilters)
|
||||
mc5->parity_enabled = 0;
|
||||
|
||||
/* Reset the TCAM */
|
||||
@ -420,7 +427,7 @@ int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
|
||||
}
|
||||
|
||||
mc5_dbgi_mode_disable(mc5);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
|
||||
@ -465,7 +472,6 @@ void t3_mc5_intr_handler(struct mc5 *mc5)
|
||||
t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* t3_mc5_prep - initialize the SW state for MC5
|
||||
* @adapter: the adapter
|
||||
|
@ -299,7 +299,7 @@ static struct cphy_ops mv88e1xxx_ops = {
|
||||
#endif
|
||||
|
||||
int t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
const struct mdio_ops *mdio_ops)
|
||||
const struct mdio_ops *mdio_ops)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -310,9 +310,9 @@ int t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
|
||||
/* Configure copper PHY transmitter as class A to reduce EMI. */
|
||||
err = mdio_write(phy, 0, MV88E1XXX_EXTENDED_ADDR, 0xb);
|
||||
|
||||
if (!err)
|
||||
err = mdio_write(phy, 0, MV88E1XXX_EXTENDED_DATA, 0x8004);
|
||||
|
||||
if (!err)
|
||||
err = mv88e1xxx_downshift_set(phy, 1); /* Enable downshift */
|
||||
return err;
|
||||
|
@ -103,6 +103,7 @@ enum CPL_opcode {
|
||||
CPL_RDMA_TERMINATE = 0xA2,
|
||||
CPL_TRACE_PKT = 0xA3,
|
||||
CPL_RDMA_EC_STATUS = 0xA5,
|
||||
CPL_SGE_EC_CR_RETURN = 0xA6,
|
||||
|
||||
NUM_CPL_CMDS /* must be last and previous entries must be sorted */
|
||||
};
|
||||
@ -148,7 +149,8 @@ enum {
|
||||
|
||||
enum {
|
||||
CPL_PASS_OPEN_ACCEPT,
|
||||
CPL_PASS_OPEN_REJECT
|
||||
CPL_PASS_OPEN_REJECT,
|
||||
CPL_PASS_OPEN_ACCEPT_TNL
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -907,6 +909,14 @@ struct cpl_wr_ack {
|
||||
__be32 snd_una;
|
||||
};
|
||||
|
||||
struct cpl_sge_ec_cr_return {
|
||||
RSS_HDR
|
||||
union opcode_tid ot;
|
||||
__be16 sge_ec_id;
|
||||
__u8 cr;
|
||||
__u8 rsvd;
|
||||
};
|
||||
|
||||
struct cpl_rdma_ec_status {
|
||||
RSS_HDR
|
||||
union opcode_tid ot;
|
||||
@ -959,9 +969,11 @@ struct cpl_rx_data {
|
||||
__u8 dack_mode:2;
|
||||
__u8 psh:1;
|
||||
__u8 heartbeat:1;
|
||||
__u8 :4;
|
||||
__u8 ddp_off:1;
|
||||
__u8 :3;
|
||||
#else
|
||||
__u8 :4;
|
||||
__u8 :3;
|
||||
__u8 ddp_off:1;
|
||||
__u8 heartbeat:1;
|
||||
__u8 psh:1;
|
||||
__u8 dack_mode:2;
|
||||
@ -1129,6 +1141,17 @@ struct cpl_tx_pkt {
|
||||
__be32 len;
|
||||
};
|
||||
|
||||
struct cpl_tx_pkt_coalesce {
|
||||
__be32 cntrl;
|
||||
__be32 len;
|
||||
__be64 addr;
|
||||
};
|
||||
|
||||
struct tx_pkt_coalesce_wr {
|
||||
WR_HDR;
|
||||
struct cpl_tx_pkt_coalesce cpl[0];
|
||||
};
|
||||
|
||||
struct cpl_tx_pkt_lso {
|
||||
WR_HDR;
|
||||
__be32 cntrl;
|
||||
@ -1265,7 +1288,8 @@ struct cpl_l2t_write_req {
|
||||
WR_HDR;
|
||||
union opcode_tid ot;
|
||||
__be32 params;
|
||||
__u8 rsvd[2];
|
||||
__u8 rsvd;
|
||||
__u8 port_idx;
|
||||
__u8 dst_mac[6];
|
||||
};
|
||||
|
||||
|
@ -461,32 +461,57 @@ int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
|
||||
return mdio_write(phy, 0, MII_BMCR, ctl);
|
||||
}
|
||||
|
||||
int t3_phy_lasi_intr_enable(struct cphy *phy)
|
||||
{
|
||||
return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
|
||||
}
|
||||
|
||||
int t3_phy_lasi_intr_disable(struct cphy *phy)
|
||||
{
|
||||
return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
|
||||
}
|
||||
|
||||
int t3_phy_lasi_intr_clear(struct cphy *phy)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
|
||||
}
|
||||
|
||||
int t3_phy_lasi_intr_handler(struct cphy *phy)
|
||||
{
|
||||
unsigned int status;
|
||||
int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
return (status & 1) ? cphy_cause_link_change : 0;
|
||||
}
|
||||
|
||||
static struct adapter_info t3_adap_info[] = {
|
||||
{ 1, 1, 0, 0, 0,
|
||||
F_GPIO2_OEN | F_GPIO4_OEN |
|
||||
F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
|
||||
0,
|
||||
F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
|
||||
&mi1_mdio_ops, "Chelsio PE9000" },
|
||||
{ 1, 1, 0, 0, 0,
|
||||
F_GPIO2_OEN | F_GPIO4_OEN |
|
||||
F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
|
||||
0,
|
||||
F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
|
||||
&mi1_mdio_ops, "Chelsio T302" },
|
||||
{ 1, 0, 0, 0, 0,
|
||||
F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
|
||||
F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
|
||||
0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
|
||||
{ 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
|
||||
&mi1_mdio_ext_ops, "Chelsio T310" },
|
||||
{ 1, 1, 0, 0, 0,
|
||||
F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
|
||||
F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
|
||||
F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
|
||||
SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
|
||||
F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
|
||||
{ S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
|
||||
&mi1_mdio_ext_ops, "Chelsio T320" },
|
||||
{ 4, 0, 0, 0, 0,
|
||||
F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
|
||||
F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
|
||||
F_GPIO1 | F_GPIO2 | F_GPIO3 | F_GPIO4, SUPPORTED_AUI,
|
||||
{ S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
|
||||
&mi1_mdio_ops, "Chelsio T304" },
|
||||
};
|
||||
|
||||
@ -505,10 +530,10 @@ static struct port_type_info port_types[] = {
|
||||
{ t3_vsc8211_phy_prep },
|
||||
{ t3_mv88e1xxx_phy_prep },
|
||||
{ t3_xaui_direct_phy_prep },
|
||||
{ NULL },
|
||||
{ t3_ael2005_phy_prep },
|
||||
{ t3_qt2045_phy_prep },
|
||||
{ t3_ael1006_phy_prep },
|
||||
{ NULL },
|
||||
{ t3_tn1010_phy_prep },
|
||||
};
|
||||
|
||||
#define VPD_ENTRY(name, len) \
|
||||
@ -1232,6 +1257,15 @@ void t3_link_changed(adapter_t *adapter, int port_id)
|
||||
|
||||
phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
|
||||
|
||||
if (lc->requested_fc & PAUSE_AUTONEG)
|
||||
fc &= lc->requested_fc;
|
||||
else
|
||||
fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
|
||||
|
||||
if (link_ok == lc->link_ok && speed == lc->speed &&
|
||||
duplex == lc->duplex && fc == lc->fc)
|
||||
return; /* nothing changed */
|
||||
|
||||
if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
|
||||
uses_xaui(adapter)) {
|
||||
if (link_ok)
|
||||
@ -1242,10 +1276,6 @@ void t3_link_changed(adapter_t *adapter, int port_id)
|
||||
lc->link_ok = (unsigned char)link_ok;
|
||||
lc->speed = speed < 0 ? SPEED_INVALID : speed;
|
||||
lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
|
||||
if (lc->requested_fc & PAUSE_AUTONEG)
|
||||
fc &= lc->requested_fc;
|
||||
else
|
||||
fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
|
||||
|
||||
if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
|
||||
/* Set MAC speed, duplex, and flow control to match PHY. */
|
||||
@ -1783,19 +1813,15 @@ static int mac_intr_handler(adapter_t *adap, unsigned int idx)
|
||||
*/
|
||||
int t3_phy_intr_handler(adapter_t *adapter)
|
||||
{
|
||||
u32 mask, gpi = adapter_info(adapter)->gpio_intr;
|
||||
u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
|
||||
|
||||
for_each_port(adapter, i) {
|
||||
struct port_info *p = adap2pinfo(adapter, i);
|
||||
|
||||
mask = gpi - (gpi & (gpi - 1));
|
||||
gpi -= mask;
|
||||
|
||||
if (!(p->phy.caps & SUPPORTED_IRQ))
|
||||
continue;
|
||||
|
||||
if (cause & mask) {
|
||||
if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
|
||||
int phy_cause = p->phy.ops->intr_handler(&p->phy);
|
||||
|
||||
if (phy_cause & cphy_cause_link_change)
|
||||
@ -1869,6 +1895,17 @@ int t3_slow_intr_handler(adapter_t *adapter)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static unsigned int calc_gpio_intr(adapter_t *adap)
|
||||
{
|
||||
unsigned int i, gpi_intr = 0;
|
||||
|
||||
for_each_port(adap, i)
|
||||
if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
|
||||
adapter_info(adap)->gpio_intr[i])
|
||||
gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
|
||||
return gpi_intr;
|
||||
}
|
||||
|
||||
/**
|
||||
* t3_intr_enable - enable interrupts
|
||||
* @adapter: the adapter whose interrupts should be enabled
|
||||
@ -1911,10 +1948,8 @@ void t3_intr_enable(adapter_t *adapter)
|
||||
t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
|
||||
}
|
||||
|
||||
t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
|
||||
adapter_info(adapter)->gpio_intr);
|
||||
t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
|
||||
adapter_info(adapter)->gpio_intr);
|
||||
t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
|
||||
|
||||
if (is_pcie(adapter))
|
||||
t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
|
||||
else
|
||||
@ -2559,6 +2594,20 @@ static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
|
||||
t3_write_reg(adap, A_TP_PIO_DATA, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* t3_enable_filters - enable the HW filters
|
||||
* @adap: the adapter
|
||||
*
|
||||
* Enables the HW filters for NIC traffic.
|
||||
*/
|
||||
void t3_enable_filters(adapter_t *adap)
|
||||
{
|
||||
t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
|
||||
t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
|
||||
t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
|
||||
tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_num_pages - calculate the number of pages of the payload memory
|
||||
* @mem_size: the size of the payload memory
|
||||
@ -2659,10 +2708,10 @@ static void tp_config(adapter_t *adap, const struct tp_params *p)
|
||||
F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
|
||||
t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
|
||||
F_MTUENABLE | V_WINDOWSCALEMODE(1) |
|
||||
V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
|
||||
V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
|
||||
t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
|
||||
V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
|
||||
V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
|
||||
V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
|
||||
F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
|
||||
t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
|
||||
F_IPV6ENABLE | F_NICMODE);
|
||||
@ -2704,7 +2753,8 @@ static void tp_config(adapter_t *adap, const struct tp_params *p)
|
||||
|
||||
if (adap->params.nports > 2) {
|
||||
t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
|
||||
F_ENABLETXPORTFROMDA | F_ENABLERXPORTFROMADDR);
|
||||
F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
|
||||
F_ENABLERXPORTFROMADDR);
|
||||
tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
|
||||
V_RXMAPMODE(M_RXMAPMODE), 0);
|
||||
tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
|
||||
@ -3619,6 +3669,8 @@ int t3_init_hw(adapter_t *adapter, u32 fw_params)
|
||||
chan_init_hw(adapter, adapter->params.chan_map);
|
||||
t3_sge_init(adapter, &adapter->params.sge);
|
||||
|
||||
t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
|
||||
|
||||
t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
|
||||
t3_write_reg(adapter, A_CIM_BOOT_CFG,
|
||||
V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
|
||||
|
@ -45,6 +45,7 @@ enum {
|
||||
VSC8211_EXT_CTRL = 23,
|
||||
VSC8211_INTR_ENABLE = 25,
|
||||
VSC8211_INTR_STATUS = 26,
|
||||
VSC8211_LED_CTRL = 27,
|
||||
VSC8211_AUX_CTRL_STAT = 28,
|
||||
VSC8211_EXT_PAGE_AXS = 31,
|
||||
};
|
||||
@ -393,8 +394,10 @@ int t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
|
||||
err = mdio_read(phy, 0, VSC8211_EXT_CTRL, &val);
|
||||
if (err)
|
||||
return err;
|
||||
if (val & VSC_CTRL_MEDIA_MODE_HI)
|
||||
return 0; /* copper interface, done */
|
||||
if (val & VSC_CTRL_MEDIA_MODE_HI) {
|
||||
/* copper interface, just need to configure the LEDs */
|
||||
return mdio_write(phy, 0, VSC8211_LED_CTRL, 0x100);
|
||||
}
|
||||
|
||||
phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
|
||||
SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ;
|
||||
|
@ -44,7 +44,6 @@ __FBSDID("$FreeBSD$");
|
||||
* # of exact address filters. The first one is used for the station address,
|
||||
* the rest are available for multicast addresses.
|
||||
*/
|
||||
#define EXACT_ADDR_FILTERS 8
|
||||
|
||||
static inline int macidx(const struct cmac *mac)
|
||||
{
|
||||
@ -159,16 +158,18 @@ int t3_mac_reset(struct cmac *mac)
|
||||
t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
|
||||
t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
|
||||
}
|
||||
|
||||
t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
|
||||
V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
|
||||
V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
|
||||
|
||||
val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
|
||||
if (is_10G(adap) || mac->multiport)
|
||||
if (!mac->multiport)
|
||||
val |= F_XG2G_RESET_;
|
||||
if (uses_xaui(adap))
|
||||
val |= F_PCS_RESET_;
|
||||
else if (uses_xaui(adap))
|
||||
val |= F_PCS_RESET_ | F_XG2G_RESET_;
|
||||
else
|
||||
val |= F_RGMII_RESET_ | F_XG2G_RESET_;
|
||||
val |= F_RGMII_RESET_;
|
||||
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
|
||||
(void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
|
||||
if ((val & F_PCS_RESET_) && adap->params.rev) {
|
||||
@ -188,10 +189,10 @@ static int t3b2_mac_reset(struct cmac *mac)
|
||||
|
||||
|
||||
/* Stop egress traffic to xgm*/
|
||||
if (!macidx(mac))
|
||||
t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
|
||||
if (!macidx(mac))
|
||||
t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
|
||||
else
|
||||
t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
|
||||
t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
|
||||
|
||||
/* PCS in reset */
|
||||
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
|
||||
@ -223,15 +224,15 @@ static int t3b2_mac_reset(struct cmac *mac)
|
||||
msleep(1);
|
||||
t3b_pcs_reset(mac);
|
||||
}
|
||||
t3_write_reg(adap, A_XGM_RX_CFG + oft,
|
||||
t3_write_reg(adap, A_XGM_RX_CFG + oft,
|
||||
F_DISPAUSEFRAMES | F_EN1536BFRAMES |
|
||||
F_RMFCS | F_ENJUMBO | F_ENHASHMCAST );
|
||||
|
||||
/*Resume egress traffic to xgm*/
|
||||
if (!macidx(mac))
|
||||
t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
|
||||
if (!macidx(mac))
|
||||
t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
|
||||
else
|
||||
t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
|
||||
t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -279,6 +280,9 @@ int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
|
||||
* Specify the number of exact address filters that should be reserved for
|
||||
* unicast addresses. Caller should reload the unicast and multicast
|
||||
* addresses after calling this.
|
||||
*
|
||||
* Generally, this is 1 with the first one used for the station address,
|
||||
* and the rest are available for multicast addresses.
|
||||
*/
|
||||
int t3_mac_set_num_ucast(struct cmac *mac, unsigned char n)
|
||||
{
|
||||
@ -385,7 +389,7 @@ static int rx_fifo_hwm(int mtu)
|
||||
*
|
||||
* Sets the MAC MTU and adjusts the FIFO PAUSE watermarks accordingly.
|
||||
*/
|
||||
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
|
||||
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
|
||||
{
|
||||
int hwm, lwm, divisor;
|
||||
int ipg;
|
||||
@ -413,7 +417,7 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
|
||||
|
||||
reg = adap->params.rev == T3_REV_B2 ?
|
||||
A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
|
||||
|
||||
|
||||
/* drain RX FIFO */
|
||||
if (t3_wait_op_done(adap, reg + mac->offset,
|
||||
F_RXFIFO_EMPTY, 1, 20, 5)) {
|
||||
@ -428,9 +432,8 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
|
||||
enable_exact_filters(mac);
|
||||
} else
|
||||
t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
|
||||
V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
|
||||
V_RXMAXPKTSIZE(mtu));
|
||||
|
||||
V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
|
||||
V_RXMAXPKTSIZE(mtu));
|
||||
/*
|
||||
* Adjust the PAUSE frame watermarks. We always set the LWM, and the
|
||||
* HWM only if flow-control is enabled.
|
||||
@ -462,10 +465,10 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
|
||||
*/
|
||||
if (adap->params.rev > 0) {
|
||||
divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
|
||||
t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
|
||||
(hwm - lwm) * 4 / divisor);
|
||||
t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
|
||||
(hwm - lwm) * 4 / divisor);
|
||||
}
|
||||
t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
|
||||
t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
|
||||
MAC_RXFIFO_SIZE * 4 * 8 / 512);
|
||||
return 0;
|
||||
}
|
||||
@ -489,7 +492,7 @@ int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
|
||||
|
||||
if (duplex >= 0 && duplex != DUPLEX_FULL)
|
||||
return -EINVAL;
|
||||
if (mac->multiport) {
|
||||
if (mac->multiport) {
|
||||
val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
|
||||
val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
|
||||
val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(t3_read_reg(adap,
|
||||
@ -575,7 +578,7 @@ int t3_mac_enable(struct cmac *mac, int which)
|
||||
mac->txen = F_TXEN;
|
||||
mac->toggle_cnt = 0;
|
||||
}
|
||||
if (which & MAC_DIRECTION_RX)
|
||||
if (which & MAC_DIRECTION_RX)
|
||||
t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
|
||||
return 0;
|
||||
}
|
||||
@ -673,10 +676,10 @@ rxcheck:
|
||||
if (rx_mcnt != mac->rx_mcnt) {
|
||||
rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
|
||||
A_XGM_RX_SPI4_SOP_EOP_CNT +
|
||||
mac->offset))) +
|
||||
mac->offset))) +
|
||||
(s->rx_fifo_ovfl - mac->rx_ocnt);
|
||||
mac->rx_ocnt = s->rx_fifo_ovfl;
|
||||
} else
|
||||
} else
|
||||
goto out;
|
||||
|
||||
if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && mac->rx_xcnt == 0) {
|
||||
@ -684,8 +687,8 @@ rxcheck:
|
||||
status = 2;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
|
||||
out:
|
||||
mac->tx_tcnt = tx_tcnt;
|
||||
mac->tx_xcnt = tx_xcnt;
|
||||
mac->tx_mcnt = s->tx_frames;
|
||||
|
@ -170,7 +170,7 @@ enum { TXQ_ETH = 0,
|
||||
* work request size in bytes
|
||||
*/
|
||||
#define WR_LEN (WR_FLITS * 8)
|
||||
#define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt))
|
||||
#define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
|
||||
|
||||
|
||||
/* careful, the following are set on priv_flags and must not collide with
|
||||
|
@ -62,6 +62,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <net/if_dl.h>
|
||||
#include <net/if_media.h>
|
||||
#include <net/if_types.h>
|
||||
#include <net/if_vlan_var.h>
|
||||
|
||||
#include <netinet/in_systm.h>
|
||||
#include <netinet/in.h>
|
||||
@ -978,7 +979,7 @@ cxgb_port_attach(device_t dev)
|
||||
* Only default to jumbo frames on 10GigE
|
||||
*/
|
||||
if (p->adapter->params.nports <= 2)
|
||||
ifp->if_mtu = 9000;
|
||||
ifp->if_mtu = ETHERMTU_JUMBO;
|
||||
if ((err = cxgb_makedev(p)) != 0) {
|
||||
printf("makedev failed %d\n", err);
|
||||
return (err);
|
||||
@ -1242,13 +1243,23 @@ cxgb_link_start(struct port_info *p)
|
||||
struct ifnet *ifp;
|
||||
struct t3_rx_mode rm;
|
||||
struct cmac *mac = &p->mac;
|
||||
int mtu, hwtagging;
|
||||
|
||||
ifp = p->ifp;
|
||||
|
||||
bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
|
||||
|
||||
mtu = ifp->if_mtu;
|
||||
if (ifp->if_capenable & IFCAP_VLAN_MTU)
|
||||
mtu += ETHER_VLAN_ENCAP_LEN;
|
||||
|
||||
hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
|
||||
|
||||
t3_init_rx_mode(&rm, p);
|
||||
if (!mac->multiport)
|
||||
t3_mac_reset(mac);
|
||||
t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
|
||||
t3_mac_set_mtu(mac, mtu);
|
||||
t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
|
||||
t3_mac_set_address(mac, 0, p->hw_addr);
|
||||
t3_mac_set_rx_mode(mac, &rm);
|
||||
t3_link_start(&p->phy, mac, &p->link_config);
|
||||
@ -1894,7 +1905,7 @@ cxgb_set_mtu(struct port_info *p, int mtu)
|
||||
struct ifnet *ifp = p->ifp;
|
||||
int error = 0;
|
||||
|
||||
if ((mtu < ETHERMIN) || (mtu > ETHER_MAX_LEN_JUMBO))
|
||||
if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
|
||||
error = EINVAL;
|
||||
else if (ifp->if_mtu != mtu) {
|
||||
PORT_LOCK(p);
|
||||
@ -1914,7 +1925,7 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
|
||||
struct port_info *p = ifp->if_softc;
|
||||
struct ifaddr *ifa = (struct ifaddr *)data;
|
||||
struct ifreq *ifr = (struct ifreq *)data;
|
||||
int flags, error = 0;
|
||||
int flags, error = 0, reinit = 0;
|
||||
uint32_t mask;
|
||||
|
||||
/*
|
||||
@ -1969,18 +1980,16 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
|
||||
if (IFCAP_TXCSUM & ifp->if_capenable) {
|
||||
ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
|
||||
ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
|
||||
| CSUM_TSO);
|
||||
| CSUM_IP | CSUM_TSO);
|
||||
} else {
|
||||
ifp->if_capenable |= IFCAP_TXCSUM;
|
||||
ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
|
||||
}
|
||||
} else if (mask & IFCAP_RXCSUM) {
|
||||
if (IFCAP_RXCSUM & ifp->if_capenable) {
|
||||
ifp->if_capenable &= ~IFCAP_RXCSUM;
|
||||
} else {
|
||||
ifp->if_capenable |= IFCAP_RXCSUM;
|
||||
ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
|
||||
| CSUM_IP);
|
||||
}
|
||||
}
|
||||
if (mask & IFCAP_RXCSUM) {
|
||||
ifp->if_capenable ^= IFCAP_RXCSUM;
|
||||
}
|
||||
if (mask & IFCAP_TSO4) {
|
||||
if (IFCAP_TSO4 & ifp->if_capenable) {
|
||||
ifp->if_capenable &= ~IFCAP_TSO4;
|
||||
@ -1995,7 +2004,26 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
|
||||
error = EINVAL;
|
||||
}
|
||||
}
|
||||
if (mask & IFCAP_VLAN_HWTAGGING) {
|
||||
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
|
||||
reinit = ifp->if_drv_flags & IFF_DRV_RUNNING;
|
||||
}
|
||||
if (mask & IFCAP_VLAN_MTU) {
|
||||
ifp->if_capenable ^= IFCAP_VLAN_MTU;
|
||||
reinit = ifp->if_drv_flags & IFF_DRV_RUNNING;
|
||||
}
|
||||
if (mask & IFCAP_VLAN_HWCSUM) {
|
||||
ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
|
||||
}
|
||||
if (reinit) {
|
||||
cxgb_stop_locked(p);
|
||||
cxgb_init_locked(p);
|
||||
}
|
||||
PORT_UNLOCK(p);
|
||||
|
||||
#ifdef VLAN_CAPABILITIES
|
||||
VLAN_CAPABILITIES(ifp);
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
error = ether_ioctl(ifp, command, data);
|
||||
@ -2116,9 +2144,11 @@ check_t3b2_mac(struct adapter *adapter)
|
||||
p->mac.stats.num_toggled++;
|
||||
else if (status == 2) {
|
||||
struct cmac *mac = &p->mac;
|
||||
int mtu = ifp->if_mtu;
|
||||
|
||||
t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN
|
||||
+ ETHER_VLAN_ENCAP_LEN);
|
||||
if (ifp->if_capenable & IFCAP_VLAN_MTU)
|
||||
mtu += ETHER_VLAN_ENCAP_LEN;
|
||||
t3_mac_set_mtu(mac, mtu);
|
||||
t3_mac_set_address(mac, 0, p->hw_addr);
|
||||
cxgb_set_rxmode(p);
|
||||
t3_link_start(&p->phy, mac, &p->link_config);
|
||||
@ -2424,7 +2454,7 @@ cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
|
||||
if (t->intr_lat >= 0) {
|
||||
struct sge_qset *qs = &sc->sge.qs[t->qset_idx];
|
||||
|
||||
q->coalesce_nsecs = t->intr_lat*1000;
|
||||
q->coalesce_usecs = t->intr_lat;
|
||||
t3_update_qset_coalesce(qs, q);
|
||||
}
|
||||
break;
|
||||
@ -2444,7 +2474,7 @@ cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
|
||||
t->fl_size[0] = q->fl_size;
|
||||
t->fl_size[1] = q->jumbo_size;
|
||||
t->polling = q->polling;
|
||||
t->intr_lat = q->coalesce_nsecs / 1000;
|
||||
t->intr_lat = q->coalesce_usecs;
|
||||
t->cong_thres = q->cong_thres;
|
||||
break;
|
||||
}
|
||||
|
@ -55,6 +55,12 @@ $FreeBSD$
|
||||
typedef struct adapter adapter_t;
|
||||
struct sge_rspq;
|
||||
|
||||
enum {
|
||||
TP_TMR_RES = 200, /* TP timer resolution in usec */
|
||||
MAX_NPORTS = 4, /* max # of ports */
|
||||
TP_SRAM_OFFSET = 4096, /* TP SRAM content offset in eeprom */
|
||||
TP_SRAM_LEN = 2112, /* TP SRAM content offset in eeprom */
|
||||
};
|
||||
|
||||
struct t3_mbuf_hdr {
|
||||
struct mbuf *mh_head;
|
||||
@ -388,6 +394,9 @@ static const int debug_flags = DBG_RX;
|
||||
#define ADVERTISE_1000XPSE_ASYM ANAR_X_PAUSE_ASYM
|
||||
#define ADVERTISE_1000XPAUSE ANAR_X_PAUSE_SYM
|
||||
|
||||
#define ADVERTISE_CSMA ANAR_CSMA
|
||||
#define ADVERTISE_NPAGE ANAR_NP
|
||||
|
||||
|
||||
/* Standard PCI Extended Capaibilities definitions */
|
||||
#define PCI_CAP_ID_VPD 0x03
|
||||
|
@ -394,12 +394,12 @@ t3_sge_prep(adapter_t *adap, struct sge_params *p)
|
||||
struct qset_params *q = p->qset + i;
|
||||
|
||||
if (adap->params.nports > 2) {
|
||||
q->coalesce_nsecs = 50000;
|
||||
q->coalesce_usecs = 50;
|
||||
} else {
|
||||
#ifdef INVARIANTS
|
||||
q->coalesce_nsecs = 10000;
|
||||
q->coalesce_usecs = 10;
|
||||
#else
|
||||
q->coalesce_nsecs = 5000;
|
||||
q->coalesce_usecs = 5;
|
||||
#endif
|
||||
}
|
||||
q->polling = adap->params.rev > 0;
|
||||
@ -490,7 +490,7 @@ void
|
||||
t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
|
||||
{
|
||||
|
||||
qs->rspq.holdoff_tmr = max(p->coalesce_nsecs/100, 1U);
|
||||
qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
|
||||
qs->rspq.polling = 0 /* p->polling */;
|
||||
}
|
||||
|
||||
@ -1314,6 +1314,10 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
|
||||
cntrl = V_TXPKT_INTF(pi->txpkt_intf);
|
||||
GET_VTAG_MI(cntrl, batchmi);
|
||||
cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
|
||||
if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
|
||||
cntrl |= F_TXPKT_IPCSUM_DIS;
|
||||
if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))))
|
||||
cntrl |= F_TXPKT_L4CSUM_DIS;
|
||||
cbe->cntrl = htonl(cntrl);
|
||||
cbe->len = htonl(batchmi->mi_len | 0x80000000);
|
||||
cbe->addr = htobe64(segs[i].ds_addr);
|
||||
@ -1343,7 +1347,7 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
|
||||
tmpmi = mv->mv_vec;
|
||||
|
||||
txd->flit[2] = 0;
|
||||
GET_VTAG_MI(cntrl, mi);
|
||||
GET_VTAG(cntrl, m0);
|
||||
cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
|
||||
hdr->cntrl = htonl(cntrl);
|
||||
mlen = m0->m_pkthdr.len;
|
||||
@ -1356,7 +1360,10 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
|
||||
|
||||
if (__predict_false(undersized)) {
|
||||
pkthdr = tmp;
|
||||
dump_mi(mi);
|
||||
if (mi)
|
||||
dump_mi(mi);
|
||||
printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%#x,flags=%#x",
|
||||
m0, mlen, m0->m_pkthdr.tso_segsz, m0->m_pkthdr.csum_flags, m0->m_flags);
|
||||
panic("discontig packet - fixxorz");
|
||||
} else
|
||||
pkthdr = m0->m_data;
|
||||
@ -1376,12 +1383,37 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
|
||||
V_LSO_IPHDR_WORDS(ip->ip_hl) |
|
||||
V_LSO_TCPHDR_WORDS(tcp->th_off);
|
||||
hdr->lso_info = htonl(tso_info);
|
||||
|
||||
if (__predict_false(mlen <= PIO_LEN)) {
|
||||
/* pkt not undersized but fits in PIO_LEN */
|
||||
printf("**5592 Fix** mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%#x,flags=%#x",
|
||||
m0, mlen, m0->m_pkthdr.tso_segsz, m0->m_pkthdr.csum_flags, m0->m_flags);
|
||||
txq_prod(txq, 1, &txqs);
|
||||
m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
|
||||
m_freem(m0);
|
||||
m0 = NULL;
|
||||
flits = (mlen + 7) / 8 + 3;
|
||||
hdr->wr.wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
|
||||
V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
|
||||
F_WR_SOP | F_WR_EOP | txqs.compl);
|
||||
wmb();
|
||||
hdr->wr.wr_lo = htonl(V_WR_LEN(flits) |
|
||||
V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
|
||||
|
||||
wr_gen2(txd, txqs.gen);
|
||||
check_ring_tx_db(sc, txq);
|
||||
return (0);
|
||||
}
|
||||
flits = 3;
|
||||
} else {
|
||||
struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
|
||||
|
||||
GET_VTAG(cntrl, m0);
|
||||
cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
|
||||
if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
|
||||
cntrl |= F_TXPKT_IPCSUM_DIS;
|
||||
if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))))
|
||||
cntrl |= F_TXPKT_L4CSUM_DIS;
|
||||
cpl->cntrl = htonl(cntrl);
|
||||
mlen = m0->m_pkthdr.len;
|
||||
cpl->len = htonl(mlen | 0x80000000);
|
||||
@ -3224,11 +3256,11 @@ t3_lro_enable(SYSCTL_HANDLER_ARGS)
|
||||
}
|
||||
|
||||
static int
|
||||
t3_set_coalesce_nsecs(SYSCTL_HANDLER_ARGS)
|
||||
t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
adapter_t *sc = arg1;
|
||||
struct qset_params *qsp = &sc->params.sge.qset[0];
|
||||
int coalesce_nsecs;
|
||||
int coalesce_usecs;
|
||||
struct sge_qset *qs;
|
||||
int i, j, err, nqsets = 0;
|
||||
struct mtx *lock;
|
||||
@ -3236,25 +3268,25 @@ t3_set_coalesce_nsecs(SYSCTL_HANDLER_ARGS)
|
||||
if ((sc->flags & FULL_INIT_DONE) == 0)
|
||||
return (ENXIO);
|
||||
|
||||
coalesce_nsecs = qsp->coalesce_nsecs;
|
||||
err = sysctl_handle_int(oidp, &coalesce_nsecs, arg2, req);
|
||||
coalesce_usecs = qsp->coalesce_usecs;
|
||||
err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
|
||||
|
||||
if (err != 0) {
|
||||
return (err);
|
||||
}
|
||||
if (coalesce_nsecs == qsp->coalesce_nsecs)
|
||||
if (coalesce_usecs == qsp->coalesce_usecs)
|
||||
return (0);
|
||||
|
||||
for (i = 0; i < sc->params.nports; i++)
|
||||
for (j = 0; j < sc->port[i].nqsets; j++)
|
||||
nqsets++;
|
||||
|
||||
coalesce_nsecs = max(100, coalesce_nsecs);
|
||||
coalesce_usecs = max(1, coalesce_usecs);
|
||||
|
||||
for (i = 0; i < nqsets; i++) {
|
||||
qs = &sc->sge.qs[i];
|
||||
qsp = &sc->params.sge.qset[i];
|
||||
qsp->coalesce_nsecs = coalesce_nsecs;
|
||||
qsp->coalesce_usecs = coalesce_usecs;
|
||||
|
||||
lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
|
||||
&sc->sge.qs[0].rspq.lock;
|
||||
@ -3357,8 +3389,8 @@ t3_add_configured_sysctls(adapter_t *sc)
|
||||
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
|
||||
"intr_coal",
|
||||
CTLTYPE_INT|CTLFLAG_RW, sc,
|
||||
0, t3_set_coalesce_nsecs,
|
||||
"I", "interrupt coalescing timer (ns)");
|
||||
0, t3_set_coalesce_usecs,
|
||||
"I", "interrupt coalescing timer (us)");
|
||||
|
||||
for (i = 0; i < sc->params.nports; i++) {
|
||||
struct port_info *pi = &sc->port[i];
|
||||
|
@ -1674,12 +1674,17 @@ t3_tcp_ctloutput(struct socket *so, struct sockopt *sopt)
|
||||
if (sopt->sopt_name != TCP_CONGESTION &&
|
||||
sopt->sopt_name != TCP_NODELAY)
|
||||
return (EOPNOTSUPP);
|
||||
|
||||
|
||||
if (sopt->sopt_name == TCP_CONGESTION) {
|
||||
char name[TCP_CA_NAME_MAX];
|
||||
int optlen = sopt->sopt_valsize;
|
||||
struct tcpcb *tp;
|
||||
|
||||
if (sopt->sopt_dir == SOPT_GET) {
|
||||
KASSERT(0, ("unimplemented"));
|
||||
return (EOPNOTSUPP);
|
||||
}
|
||||
|
||||
if (optlen < 1)
|
||||
return (EINVAL);
|
||||
|
||||
@ -1705,6 +1710,9 @@ t3_tcp_ctloutput(struct socket *so, struct sockopt *sopt)
|
||||
struct inpcb *inp;
|
||||
struct tcpcb *tp;
|
||||
|
||||
if (sopt->sopt_dir == SOPT_GET)
|
||||
return (EOPNOTSUPP);
|
||||
|
||||
err = sooptcopyin(sopt, &optval, sizeof optval,
|
||||
sizeof optval);
|
||||
|
||||
|
@ -1733,3 +1733,9 @@ DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
struct thread *
|
||||
intr_handler_thread(struct intr_handler *ih)
|
||||
{
|
||||
return (ih->ih_event->ie_thread->it_thread);
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/bus.h>
|
||||
#include <sys/interrupt.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/ktr.h>
|
||||
#include <sys/kthread.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/malloc.h>
|
||||
@ -48,6 +49,8 @@ static void *taskqueue_ih;
|
||||
static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
|
||||
static struct mtx taskqueue_queues_mutex;
|
||||
|
||||
STAILQ_HEAD(task_head, task);
|
||||
|
||||
struct taskqueue {
|
||||
STAILQ_ENTRY(taskqueue) tq_link;
|
||||
STAILQ_HEAD(, task) tq_queue;
|
||||
@ -58,31 +61,35 @@ struct taskqueue {
|
||||
struct mtx tq_mutex;
|
||||
struct thread **tq_threads;
|
||||
int tq_tcount;
|
||||
int tq_spin;
|
||||
int tq_flags;
|
||||
};
|
||||
|
||||
#define TQ_FLAGS_ACTIVE (1 << 0)
|
||||
#define TQ_FLAGS_BLOCKED (1 << 1)
|
||||
#define TQ_FLAGS_PENDING (1 << 2)
|
||||
#define TQ_FLAGS_SPIN (1 << 3)
|
||||
#define TQ_FLAGS_NOWAKEUP (1 << 4)
|
||||
#define TQ_FLAGS_RUNNING (1 << 5)
|
||||
|
||||
static __inline void
|
||||
TQ_LOCK(struct taskqueue *tq)
|
||||
{
|
||||
if (tq->tq_spin)
|
||||
mtx_lock_spin(&tq->tq_mutex);
|
||||
else
|
||||
mtx_lock(&tq->tq_mutex);
|
||||
}
|
||||
#define TQ_LOCK(tq) \
|
||||
do { \
|
||||
\
|
||||
if (tq->tq_flags & TQ_FLAGS_SPIN) \
|
||||
mtx_lock_spin(&tq->tq_mutex); \
|
||||
else \
|
||||
mtx_lock(&tq->tq_mutex); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define TQ_UNLOCK(tq) \
|
||||
do { \
|
||||
\
|
||||
if (tq->tq_flags & TQ_FLAGS_SPIN) \
|
||||
mtx_unlock_spin(&tq->tq_mutex); \
|
||||
else \
|
||||
mtx_unlock(&tq->tq_mutex); \
|
||||
} while (0)
|
||||
|
||||
static __inline void
|
||||
TQ_UNLOCK(struct taskqueue *tq)
|
||||
{
|
||||
if (tq->tq_spin)
|
||||
mtx_unlock_spin(&tq->tq_mutex);
|
||||
else
|
||||
mtx_unlock(&tq->tq_mutex);
|
||||
}
|
||||
|
||||
static void init_taskqueue_list(void *data);
|
||||
|
||||
@ -90,7 +97,7 @@ static __inline int
|
||||
TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
|
||||
int t)
|
||||
{
|
||||
if (tq->tq_spin)
|
||||
if (tq->tq_flags & TQ_FLAGS_SPIN)
|
||||
return (msleep_spin(p, m, wm, t));
|
||||
return (msleep(p, m, pri, wm, t));
|
||||
}
|
||||
@ -111,17 +118,18 @@ _taskqueue_create(const char *name, int mflags,
|
||||
int mtxflags, const char *mtxname)
|
||||
{
|
||||
struct taskqueue *queue;
|
||||
|
||||
int spin;
|
||||
|
||||
|
||||
queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
|
||||
if (!queue)
|
||||
return 0;
|
||||
|
||||
spin = ((mtxflags & MTX_SPIN) ? TQ_FLAGS_SPIN : 0);
|
||||
STAILQ_INIT(&queue->tq_queue);
|
||||
queue->tq_name = name;
|
||||
queue->tq_enqueue = enqueue;
|
||||
queue->tq_context = context;
|
||||
queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
|
||||
queue->tq_flags |= TQ_FLAGS_ACTIVE;
|
||||
queue->tq_flags |= TQ_FLAGS_ACTIVE | spin;
|
||||
mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
|
||||
|
||||
mtx_lock(&taskqueue_queues_mutex);
|
||||
@ -200,8 +208,14 @@ taskqueue_enqueue(struct taskqueue *queue, struct task *task)
|
||||
/*
|
||||
* Count multiple enqueues.
|
||||
*/
|
||||
if (task->ta_pending) {
|
||||
if (task->ta_pending || (task->ta_flags & TA_REFERENCED)) {
|
||||
task->ta_pending++;
|
||||
/*
|
||||
* overflow
|
||||
*/
|
||||
if (task->ta_pending == 0)
|
||||
task->ta_pending--;
|
||||
|
||||
TQ_UNLOCK(queue);
|
||||
return 0;
|
||||
}
|
||||
@ -226,9 +240,9 @@ taskqueue_enqueue(struct taskqueue *queue, struct task *task)
|
||||
}
|
||||
|
||||
task->ta_pending = 1;
|
||||
if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
|
||||
if ((queue->tq_flags & (TQ_FLAGS_BLOCKED|TQ_FLAGS_RUNNING)) == 0)
|
||||
queue->tq_enqueue(queue->tq_context);
|
||||
else
|
||||
else if (queue->tq_flags & TQ_FLAGS_BLOCKED)
|
||||
queue->tq_flags |= TQ_FLAGS_PENDING;
|
||||
|
||||
TQ_UNLOCK(queue);
|
||||
@ -297,7 +311,7 @@ taskqueue_run(struct taskqueue *queue)
|
||||
void
|
||||
taskqueue_drain(struct taskqueue *queue, struct task *task)
|
||||
{
|
||||
if (queue->tq_spin) { /* XXX */
|
||||
if (queue->tq_flags & TQ_FLAGS_SPIN) { /* XXX */
|
||||
mtx_lock_spin(&queue->tq_mutex);
|
||||
while (task->ta_pending != 0 || task == queue->tq_running)
|
||||
msleep_spin(task, &queue->tq_mutex, "-", 0);
|
||||
@ -465,3 +479,138 @@ taskqueue_fast_run(void *dummy)
|
||||
TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, 0,
|
||||
swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL,
|
||||
SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
|
||||
|
||||
static void
|
||||
taskqueue_run_drv(void *arg)
|
||||
{
|
||||
struct task *task, *tmp;
|
||||
struct task_head current;
|
||||
int restarts = 0;
|
||||
struct taskqueue *queue = (struct taskqueue *) arg;
|
||||
|
||||
STAILQ_INIT(¤t);
|
||||
/*
|
||||
* First we move all of the tasks off of the taskqueue's list
|
||||
* on to current on the stack to avoided repeated serialization
|
||||
*/
|
||||
mtx_lock_spin(&queue->tq_mutex);
|
||||
queue->tq_flags |= TQ_FLAGS_RUNNING;
|
||||
restart:
|
||||
STAILQ_CONCAT(¤t, &queue->tq_queue);
|
||||
STAILQ_FOREACH(task, ¤t, ta_link) {
|
||||
/*
|
||||
* to let taskqueue_enqueue_fast know that this task
|
||||
* has been dequeued but is referenced
|
||||
* clear pending so that if pending is later set we know that it
|
||||
* it needs to be re-enqueued even if the task doesn't return
|
||||
* TA_NO_DEQUEUE
|
||||
*/
|
||||
task->ta_ppending = task->ta_pending;
|
||||
task->ta_pending = 0;
|
||||
task->ta_flags |= TA_REFERENCED;
|
||||
}
|
||||
mtx_unlock_spin(&queue->tq_mutex);
|
||||
STAILQ_FOREACH(task, ¤t, ta_link) {
|
||||
task->ta_rc = task->ta_drv_func(task->ta_context, task->ta_ppending);
|
||||
|
||||
}
|
||||
/*
|
||||
* We've gotten here so we know that we've run the tasks that were
|
||||
* on the taskqueue list on the first pass
|
||||
*/
|
||||
mtx_lock_spin(&queue->tq_mutex);
|
||||
STAILQ_FOREACH_SAFE(task, ¤t, ta_link, tmp) {
|
||||
if (task->ta_rc != TA_NO_DEQUEUE && task->ta_pending == 0) {
|
||||
STAILQ_REMOVE(¤t, task, task, ta_link);
|
||||
task->ta_flags &= ~TA_REFERENCED;
|
||||
}
|
||||
task->ta_ppending = 0;
|
||||
task->ta_rc = 0;
|
||||
}
|
||||
/*
|
||||
* restart if there are any tasks in the list
|
||||
*/
|
||||
if (STAILQ_FIRST(¤t) || STAILQ_FIRST(&queue->tq_queue)) {
|
||||
restarts++;
|
||||
goto restart;
|
||||
}
|
||||
queue->tq_flags &= ~TQ_FLAGS_RUNNING;
|
||||
mtx_unlock_spin(&queue->tq_mutex);
|
||||
CTR2(KTR_INTR, "queue=%s returning from taskqueue_run_drv after %d restarts", queue->tq_name, restarts);
|
||||
}
|
||||
|
||||
static void
|
||||
taskqueue_drv_schedule(void *context)
|
||||
{
|
||||
swi_sched(context, 0);
|
||||
}
|
||||
|
||||
struct taskqueue *
|
||||
taskqueue_define_drv(void *arg, const char *name)
|
||||
{
|
||||
struct taskqueue *tq;
|
||||
struct thread *td;
|
||||
|
||||
tq = malloc(sizeof(struct taskqueue), M_TASKQUEUE,
|
||||
M_NOWAIT | M_ZERO);
|
||||
if (!tq) {
|
||||
printf("%s: Unable to allocate fast drv task queue!\n",
|
||||
__func__);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
STAILQ_INIT(&tq->tq_queue);
|
||||
tq->tq_name = name;
|
||||
tq->tq_enqueue = taskqueue_drv_schedule;
|
||||
tq->tq_flags = (TQ_FLAGS_ACTIVE | TQ_FLAGS_SPIN | TQ_FLAGS_NOWAKEUP);
|
||||
mtx_init(&tq->tq_mutex, name, NULL, MTX_SPIN);
|
||||
|
||||
mtx_lock(&taskqueue_queues_mutex);
|
||||
STAILQ_INSERT_TAIL(&taskqueue_queues, tq, tq_link);
|
||||
mtx_unlock(&taskqueue_queues_mutex);
|
||||
|
||||
swi_add(NULL, name, taskqueue_run_drv,
|
||||
tq, SWI_NET, INTR_MPSAFE, &tq->tq_context);
|
||||
td = intr_handler_thread((struct intr_handler *) tq->tq_context);
|
||||
return (tq);
|
||||
}
|
||||
|
||||
struct intr_handler *
|
||||
taskqueue_drv_handler(struct taskqueue *tq)
|
||||
{
|
||||
return ((struct intr_handler *) tq->tq_context);
|
||||
}
|
||||
|
||||
struct thread *
|
||||
taskqueue_drv_thread(void *context)
|
||||
{
|
||||
struct taskqueue *tq = (struct taskqueue *) context;
|
||||
|
||||
return (intr_handler_thread((struct intr_handler *) tq->tq_context));
|
||||
}
|
||||
|
||||
/*
|
||||
* Caller must make sure that there must not be any new tasks getting queued
|
||||
* before calling this.
|
||||
*/
|
||||
void
|
||||
taskqueue_free_drv(struct taskqueue *queue)
|
||||
{
|
||||
struct intr_thread *ithd;
|
||||
struct intr_event *ie;
|
||||
|
||||
mtx_lock(&taskqueue_queues_mutex);
|
||||
STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
|
||||
mtx_unlock(&taskqueue_queues_mutex);
|
||||
|
||||
ie = ((struct intr_handler *)(queue->tq_context))->ih_event;
|
||||
ithd = ie->ie_thread;
|
||||
swi_remove(queue->tq_context);
|
||||
intr_event_destroy(ie);
|
||||
|
||||
mtx_lock_spin(&queue->tq_mutex);
|
||||
taskqueue_run(queue);
|
||||
mtx_destroy(&queue->tq_mutex);
|
||||
free(queue, M_TASKQUEUE);
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@ CXGB = ${.CURDIR}/../../../dev/cxgb
|
||||
KMOD= if_cxgb
|
||||
SRCS= cxgb_mc5.c cxgb_vsc8211.c cxgb_ael1002.c cxgb_mv88e1xxx.c
|
||||
SRCS+= cxgb_xgmac.c cxgb_vsc7323.c cxgb_t3_hw.c cxgb_main.c
|
||||
SRCS+= cxgb_sge.c cxgb_lro.c cxgb_offload.c
|
||||
SRCS+= cxgb_sge.c cxgb_lro.c cxgb_offload.c cxgb_tn1010.c
|
||||
SRCS+= device_if.h bus_if.h pci_if.h opt_zero.h opt_sched.h
|
||||
SRCS+= uipc_mvec.c cxgb_support.c cxgb_multiq.c
|
||||
|
||||
|
@ -38,13 +38,31 @@
|
||||
* times the task was enqueued before the call to taskqueue_run().
|
||||
*/
|
||||
typedef void task_fn_t(void *context, int pending);
|
||||
typedef int task_drv_fn_t(void *context, int pending);
|
||||
|
||||
struct task {
|
||||
STAILQ_ENTRY(task) ta_link; /* link for queue */
|
||||
u_short ta_pending; /* count times queued */
|
||||
u_short ta_priority; /* Priority */
|
||||
task_fn_t *ta_func; /* task handler */
|
||||
u_short ta_pending; /* count times queued */
|
||||
u_short ta_priority; /* Priority */
|
||||
uint16_t ta_ppending; /* previous pending value */
|
||||
uint8_t ta_rc; /* last return code */
|
||||
uint8_t ta_flags; /* flag state */
|
||||
union {
|
||||
task_fn_t *_ta_func; /* task handler */
|
||||
task_drv_fn_t *_ta_drv_func; /* task handler */
|
||||
} u;
|
||||
|
||||
void *ta_context; /* argument for handler */
|
||||
};
|
||||
|
||||
#define ta_func u._ta_func
|
||||
#define ta_drv_func u._ta_drv_func
|
||||
|
||||
|
||||
#define TA_COMPLETE 0x0
|
||||
#define TA_NO_DEQUEUE 0x1
|
||||
|
||||
#define TA_REFERENCED (1 << 0)
|
||||
|
||||
|
||||
#endif /* !_SYS__TASK_H_ */
|
||||
|
@ -167,5 +167,6 @@ int swi_add(struct intr_event **eventp, const char *name,
|
||||
void **cookiep);
|
||||
void swi_sched(void *cookie, int flags);
|
||||
int swi_remove(void *cookie);
|
||||
struct thread *intr_handler_thread(struct intr_handler *ih);
|
||||
|
||||
#endif
|
||||
|
@ -75,6 +75,8 @@ void taskqueue_thread_enqueue(void *context);
|
||||
(task)->ta_priority = (priority); \
|
||||
(task)->ta_func = (func); \
|
||||
(task)->ta_context = (context); \
|
||||
(task)->ta_ppending = 0; \
|
||||
(task)->ta_rc = 0; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
@ -158,4 +160,10 @@ struct taskqueue *taskqueue_create_fast(const char *name, int mflags,
|
||||
taskqueue_enqueue_fn enqueue,
|
||||
void *context);
|
||||
|
||||
struct taskqueue *taskqueue_define_drv(void *arg, const char *name);
|
||||
struct thread *taskqueue_drv_thread(void *context);
|
||||
struct intr_handler *taskqueue_drv_handler(struct taskqueue *);
|
||||
void taskqueue_free_drv(struct taskqueue *queue);
|
||||
|
||||
|
||||
#endif /* !_SYS_TASKQUEUE_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user