@@ -622,6 +622,7 @@ struct qib_pportdata {
u8 link_speed_active;
u8 vls_supported;
u8 vls_operational;
+ u8 n_krcv_queues;
/* Rx Polarity inversion (compensate for ~tx on partner) */
u8 rx_pol_inv;
@@ -1036,7 +1037,6 @@ struct qib_devdata {
u8 num_pports;
/* Lowest context number which can be used by user processes */
u8 first_user_ctxt;
- u8 n_krcv_queues;
u8 qpn_mask;
u8 skip_kctxt_mask;
@@ -2071,7 +2071,7 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
} else
dd->first_user_ctxt = dd->num_pports;
- dd->n_krcv_queues = dd->first_user_ctxt;
+ dd->pport[0].n_krcv_queues = dd->first_user_ctxt;
}
static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
@@ -2305,7 +2305,7 @@ static void qib_7220_config_ctxts(struct qib_devdata *dd)
dd->first_user_ctxt = nchipctxts;
} else
dd->first_user_ctxt = dd->num_pports;
- dd->n_krcv_queues = dd->first_user_ctxt;
+ dd->pport[0].n_krcv_queues = dd->first_user_ctxt;
if (!cfgctxts) {
int nctxts = dd->first_user_ctxt + num_online_cpus();
@@ -3174,7 +3174,8 @@ try_intx:
snprintf(dd->cspec->msix_entries[msixnum].name,
sizeof(dd->cspec->msix_entries[msixnum].name)
- 1,
- QIB_DRV_NAME "%d (kctx)", dd->unit);
+ QIB_DRV_NAME "%d:%d (kctx)", dd->unit,
+ ((struct qib_ctxtdata *)arg)->ppd->port);
}
ret = request_irq(
dd->cspec->msix_entries[msixnum].msix.vector,
@@ -3593,21 +3594,27 @@ qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
static void qib_7322_config_ctxts(struct qib_devdata *dd)
{
unsigned long flags;
- u32 nchipctxts;
+ u32 nchipctxts, nkrcvqs;
u32 cfgctxts = QIB_MODPARAM_GET(cfgctxts, dd->unit, 0);
- u32 nkrcvqs = QIB_MODPARAM_GET(krcvqs, dd->unit, 0);
+ u8 pidx;
nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
dd->cspec->numctxts = nchipctxts;
- if (nkrcvqs > 1 && dd->num_pports) {
- dd->first_user_ctxt = NUM_IB_PORTS +
- (nkrcvqs - 1) * dd->num_pports;
- if (dd->first_user_ctxt > nchipctxts)
- dd->first_user_ctxt = nchipctxts;
- dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
- } else {
- dd->first_user_ctxt = NUM_IB_PORTS;
- dd->n_krcv_queues = 1;
+ dd->first_user_ctxt = NUM_IB_PORTS;
+
+ for (pidx = 0; pidx < dd->num_pports; pidx++) {
+ nkrcvqs = QIB_MODPARAM_GET(krcvqs, dd->unit, pidx+1);
+ if (nkrcvqs > 1) {
+ if (nkrcvqs - 1 > nchipctxts - dd->first_user_ctxt)
+ dd->pport[pidx].n_krcv_queues =
+ (nchipctxts - dd->first_user_ctxt) + 1;
+ else
+ dd->pport[pidx].n_krcv_queues = nkrcvqs;
+ dd->first_user_ctxt +=
+ dd->pport[pidx].n_krcv_queues - 1;
+ } else
+ /* Account for the HW ctxt */
+ dd->pport[pidx].n_krcv_queues = 1;
}
if (!cfgctxts) {
@@ -5995,11 +6002,11 @@ static void write_7322_initregs(struct qib_devdata *dd)
qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- unsigned n, regno;
+ unsigned i, n, regno, ctxts[18];
unsigned long flags;
- if (dd->n_krcv_queues < 2 ||
- !dd->pport[pidx].link_speed_supported)
+ if (dd->pport[pidx].n_krcv_queues == 1 ||
+ !dd->pport[pidx].link_speed_supported)
continue;
ppd = &dd->pport[pidx];
@@ -6012,19 +6019,18 @@ static void write_7322_initregs(struct qib_devdata *dd)
/* Initialize QP to context mapping */
regno = krp_rcvqpmaptable;
val = 0;
- if (dd->num_pports > 1)
- n = dd->first_user_ctxt / dd->num_pports;
- else
- n = dd->first_user_ctxt - 1;
+ for (i = 0, n = 0; n < dd->first_user_ctxt; n++) {
+ if (dd->skip_kctxt_mask & (1 << n))
+ continue;
+ if (dd->rcd[n]->ppd->port == pidx+1)
+ ctxts[i++] = n;
+ if (i == ppd->n_krcv_queues)
+ break;
+ }
for (i = 0; i < 32; ) {
unsigned ctxt;
- if (dd->num_pports > 1)
- ctxt = (i % n) * dd->num_pports + pidx;
- else if (i % n)
- ctxt = (i % n) + 1;
- else
- ctxt = ppd->hw_pidx;
+ ctxt = ctxts[i % ppd->n_krcv_queues];
val |= ctxt << (5 * (i % 6));
i++;
if (i % 6 == 0) {
@@ -6348,8 +6354,8 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
goto bail; /* no error, so can still figure out why err */
}
- write_7322_initregs(dd);
ret = qib_create_ctxts(dd);
+ write_7322_initregs(dd);
init_7322_cntrnames(dd);
updthresh = 8U; /* update threshold */
@@ -114,7 +114,8 @@ void qib_set_ctxtcnt(struct qib_devdata *dd)
*/
int qib_create_ctxts(struct qib_devdata *dd)
{
- unsigned i;
+ unsigned i, c, p;
+ unsigned port;
int ret;
/*
@@ -129,15 +130,28 @@ int qib_create_ctxts(struct qib_devdata *dd)
goto done;
}
+ c = dd->num_pports ? min(
+ (unsigned)dd->pport[0].n_krcv_queues,
+ (dd->num_pports > 1 ?
+ (unsigned)dd->pport[1].n_krcv_queues : (unsigned)-1))
+ : 0;
+ p = dd->num_pports > 1 ?
+ (dd->pport[0].n_krcv_queues > dd->pport[1].n_krcv_queues ?
+ 0 : 1) : 0;
+
/* create (one or more) kctxt */
- for (i = 0; i < dd->first_user_ctxt; ++i) {
+ for (port = 0, i = 0; i < dd->first_user_ctxt; ++i) {
struct qib_pportdata *ppd;
struct qib_ctxtdata *rcd;
if (dd->skip_kctxt_mask & (1 << i))
continue;
- ppd = dd->pport + (i % dd->num_pports);
+ if (i < (c * dd->num_pports))
+ ppd = dd->pport + (i % dd->num_pports);
+ else
+ ppd = dd->pport + p;
+
rcd = qib_create_ctxtdata(ppd, i);
if (!rcd) {
qib_dev_err(dd, "Unable to allocate ctxtdata"
@@ -121,6 +121,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
enum ib_qp_type type, u8 port)
{
u32 i, offset, max_scan, qpn;
+ unsigned krcvqs;
struct qpn_map *map;
u32 ret;
@@ -138,10 +139,11 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
goto bail;
}
+ krcvqs = dd->pport[port-1].n_krcv_queues;
qpn = qpt->last + 2;
if (qpn >= QPN_MAX)
qpn = 2;
- if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
+ if (qpt->mask && ((qpn & qpt->mask) >> 1) >= krcvqs)
qpn = (qpn | qpt->mask) + 2;
offset = qpn & BITS_PER_PAGE_MASK;
map = &qpt->map[qpn / BITS_PER_PAGE];
@@ -159,7 +161,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
goto bail;
}
offset = find_next_offset(qpt, map, offset,
- dd->n_krcv_queues);
+ krcvqs);
qpn = mk_qpn(qpt, map, offset);
/*
* This test differs from alloc_pidmap().