]> rtime.felk.cvut.cz Git - mcf548x/linux.git/blob - drivers/net/benet/be_cmds.c
Initial 2.6.37
[mcf548x/linux.git] / drivers / net / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20
21 static void be_mcc_notify(struct be_adapter *adapter)
22 {
23         struct be_queue_info *mccq = &adapter->mcc_obj.q;
24         u32 val = 0;
25
26         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28
29         wmb();
30         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
31 }
32
33 /* To check if valid bit is set, check the entire word as we don't know
34  * the endianness of the data (old entry is host endian while a new entry is
35  * little endian) */
36 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
37 {
38         if (compl->flags != 0) {
39                 compl->flags = le32_to_cpu(compl->flags);
40                 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
41                 return true;
42         } else {
43                 return false;
44         }
45 }
46
47 /* Need to reset the entire word that houses the valid bit */
48 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
49 {
50         compl->flags = 0;
51 }
52
53 static int be_mcc_compl_process(struct be_adapter *adapter,
54         struct be_mcc_compl *compl)
55 {
56         u16 compl_status, extd_status;
57
58         /* Just swap the status to host endian; mcc tag is opaquely copied
59          * from mcc_wrb */
60         be_dws_le_to_cpu(compl, 4);
61
62         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
63                                 CQE_STATUS_COMPL_MASK;
64
65         if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
66                 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
67                 adapter->flash_status = compl_status;
68                 complete(&adapter->flash_compl);
69         }
70
71         if (compl_status == MCC_STATUS_SUCCESS) {
72                 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
73                         struct be_cmd_resp_get_stats *resp =
74                                                 adapter->stats_cmd.va;
75                         be_dws_le_to_cpu(&resp->hw_stats,
76                                                 sizeof(resp->hw_stats));
77                         netdev_stats_update(adapter);
78                         adapter->stats_ioctl_sent = false;
79                 }
80         } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
81                    (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
82                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
83                                 CQE_STATUS_EXTD_MASK;
84                 dev_warn(&adapter->pdev->dev,
85                 "Error in cmd completion - opcode %d, compl %d, extd %d\n",
86                         compl->tag0, compl_status, extd_status);
87         }
88         return compl_status;
89 }
90
91 /* Link state evt is a string of bytes; no need for endian swapping */
92 static void be_async_link_state_process(struct be_adapter *adapter,
93                 struct be_async_event_link_state *evt)
94 {
95         be_link_status_update(adapter,
96                 evt->port_link_status == ASYNC_EVENT_LINK_UP);
97 }
98
99 /* Grp5 CoS Priority evt */
100 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
101                 struct be_async_event_grp5_cos_priority *evt)
102 {
103         if (evt->valid) {
104                 adapter->vlan_prio_bmap = evt->available_priority_bmap;
105                 adapter->recommended_prio =
106                         evt->reco_default_priority << VLAN_PRIO_SHIFT;
107         }
108 }
109
110 /* Grp5 QOS Speed evt */
111 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
112                 struct be_async_event_grp5_qos_link_speed *evt)
113 {
114         if (evt->physical_port == adapter->port_num) {
115                 /* qos_link_speed is in units of 10 Mbps */
116                 adapter->link_speed = evt->qos_link_speed * 10;
117         }
118 }
119
120 static void be_async_grp5_evt_process(struct be_adapter *adapter,
121                 u32 trailer, struct be_mcc_compl *evt)
122 {
123         u8 event_type = 0;
124
125         event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
126                 ASYNC_TRAILER_EVENT_TYPE_MASK;
127
128         switch (event_type) {
129         case ASYNC_EVENT_COS_PRIORITY:
130                 be_async_grp5_cos_priority_process(adapter,
131                 (struct be_async_event_grp5_cos_priority *)evt);
132         break;
133         case ASYNC_EVENT_QOS_SPEED:
134                 be_async_grp5_qos_speed_process(adapter,
135                 (struct be_async_event_grp5_qos_link_speed *)evt);
136         break;
137         default:
138                 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
139                 break;
140         }
141 }
142
143 static inline bool is_link_state_evt(u32 trailer)
144 {
145         return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
146                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
147                                 ASYNC_EVENT_CODE_LINK_STATE;
148 }
149
150 static inline bool is_grp5_evt(u32 trailer)
151 {
152         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
153                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
154                                 ASYNC_EVENT_CODE_GRP_5);
155 }
156
157 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
158 {
159         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
160         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
161
162         if (be_mcc_compl_is_new(compl)) {
163                 queue_tail_inc(mcc_cq);
164                 return compl;
165         }
166         return NULL;
167 }
168
169 void be_async_mcc_enable(struct be_adapter *adapter)
170 {
171         spin_lock_bh(&adapter->mcc_cq_lock);
172
173         be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
174         adapter->mcc_obj.rearm_cq = true;
175
176         spin_unlock_bh(&adapter->mcc_cq_lock);
177 }
178
179 void be_async_mcc_disable(struct be_adapter *adapter)
180 {
181         adapter->mcc_obj.rearm_cq = false;
182 }
183
184 int be_process_mcc(struct be_adapter *adapter, int *status)
185 {
186         struct be_mcc_compl *compl;
187         int num = 0;
188         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
189
190         spin_lock_bh(&adapter->mcc_cq_lock);
191         while ((compl = be_mcc_compl_get(adapter))) {
192                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
193                         /* Interpret flags as an async trailer */
194                         if (is_link_state_evt(compl->flags))
195                                 be_async_link_state_process(adapter,
196                                 (struct be_async_event_link_state *) compl);
197                         else if (is_grp5_evt(compl->flags))
198                                 be_async_grp5_evt_process(adapter,
199                                 compl->flags, compl);
200                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
201                                 *status = be_mcc_compl_process(adapter, compl);
202                                 atomic_dec(&mcc_obj->q.used);
203                 }
204                 be_mcc_compl_use(compl);
205                 num++;
206         }
207
208         spin_unlock_bh(&adapter->mcc_cq_lock);
209         return num;
210 }
211
212 /* Wait till no more pending mcc requests are present */
213 static int be_mcc_wait_compl(struct be_adapter *adapter)
214 {
215 #define mcc_timeout             120000 /* 12s timeout */
216         int i, num, status = 0;
217         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
218
219         for (i = 0; i < mcc_timeout; i++) {
220                 num = be_process_mcc(adapter, &status);
221                 if (num)
222                         be_cq_notify(adapter, mcc_obj->cq.id,
223                                 mcc_obj->rearm_cq, num);
224
225                 if (atomic_read(&mcc_obj->q.used) == 0)
226                         break;
227                 udelay(100);
228         }
229         if (i == mcc_timeout) {
230                 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
231                 return -1;
232         }
233         return status;
234 }
235
236 /* Notify MCC requests and wait for completion */
237 static int be_mcc_notify_wait(struct be_adapter *adapter)
238 {
239         be_mcc_notify(adapter);
240         return be_mcc_wait_compl(adapter);
241 }
242
243 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
244 {
245         int msecs = 0;
246         u32 ready;
247
248         do {
249                 ready = ioread32(db);
250                 if (ready == 0xffffffff) {
251                         dev_err(&adapter->pdev->dev,
252                                 "pci slot disconnected\n");
253                         return -1;
254                 }
255
256                 ready &= MPU_MAILBOX_DB_RDY_MASK;
257                 if (ready)
258                         break;
259
260                 if (msecs > 4000) {
261                         dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
262                         be_detect_dump_ue(adapter);
263                         return -1;
264                 }
265
266                 set_current_state(TASK_INTERRUPTIBLE);
267                 schedule_timeout(msecs_to_jiffies(1));
268                 msecs++;
269         } while (true);
270
271         return 0;
272 }
273
274 /*
275  * Insert the mailbox address into the doorbell in two steps
276  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
277  */
278 static int be_mbox_notify_wait(struct be_adapter *adapter)
279 {
280         int status;
281         u32 val = 0;
282         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
283         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
284         struct be_mcc_mailbox *mbox = mbox_mem->va;
285         struct be_mcc_compl *compl = &mbox->compl;
286
287         /* wait for ready to be set */
288         status = be_mbox_db_ready_wait(adapter, db);
289         if (status != 0)
290                 return status;
291
292         val |= MPU_MAILBOX_DB_HI_MASK;
293         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
294         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
295         iowrite32(val, db);
296
297         /* wait for ready to be set */
298         status = be_mbox_db_ready_wait(adapter, db);
299         if (status != 0)
300                 return status;
301
302         val = 0;
303         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
304         val |= (u32)(mbox_mem->dma >> 4) << 2;
305         iowrite32(val, db);
306
307         status = be_mbox_db_ready_wait(adapter, db);
308         if (status != 0)
309                 return status;
310
311         /* A cq entry has been made now */
312         if (be_mcc_compl_is_new(compl)) {
313                 status = be_mcc_compl_process(adapter, &mbox->compl);
314                 be_mcc_compl_use(compl);
315                 if (status)
316                         return status;
317         } else {
318                 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
319                 return -1;
320         }
321         return 0;
322 }
323
324 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
325 {
326         u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
327
328         *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
329         if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
330                 return -1;
331         else
332                 return 0;
333 }
334
335 int be_cmd_POST(struct be_adapter *adapter)
336 {
337         u16 stage;
338         int status, timeout = 0;
339
340         do {
341                 status = be_POST_stage_get(adapter, &stage);
342                 if (status) {
343                         dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
344                                 stage);
345                         return -1;
346                 } else if (stage != POST_STAGE_ARMFW_RDY) {
347                         set_current_state(TASK_INTERRUPTIBLE);
348                         schedule_timeout(2 * HZ);
349                         timeout += 2;
350                 } else {
351                         return 0;
352                 }
353         } while (timeout < 40);
354
355         dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
356         return -1;
357 }
358
359 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
360 {
361         return wrb->payload.embedded_payload;
362 }
363
364 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
365 {
366         return &wrb->payload.sgl[0];
367 }
368
369 /* Don't touch the hdr after it's prepared */
370 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
371                                 bool embedded, u8 sge_cnt, u32 opcode)
372 {
373         if (embedded)
374                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
375         else
376                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
377                                 MCC_WRB_SGE_CNT_SHIFT;
378         wrb->payload_length = payload_len;
379         wrb->tag0 = opcode;
380         be_dws_cpu_to_le(wrb, 8);
381 }
382
383 /* Don't touch the hdr after it's prepared */
384 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
385                                 u8 subsystem, u8 opcode, int cmd_len)
386 {
387         req_hdr->opcode = opcode;
388         req_hdr->subsystem = subsystem;
389         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
390         req_hdr->version = 0;
391 }
392
393 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
394                         struct be_dma_mem *mem)
395 {
396         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
397         u64 dma = (u64)mem->dma;
398
399         for (i = 0; i < buf_pages; i++) {
400                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
401                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
402                 dma += PAGE_SIZE_4K;
403         }
404 }
405
406 /* Converts interrupt delay in microseconds to multiplier value */
407 static u32 eq_delay_to_mult(u32 usec_delay)
408 {
409 #define MAX_INTR_RATE                   651042
410         const u32 round = 10;
411         u32 multiplier;
412
413         if (usec_delay == 0)
414                 multiplier = 0;
415         else {
416                 u32 interrupt_rate = 1000000 / usec_delay;
417                 /* Max delay, corresponding to the lowest interrupt rate */
418                 if (interrupt_rate == 0)
419                         multiplier = 1023;
420                 else {
421                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
422                         multiplier /= interrupt_rate;
423                         /* Round the multiplier to the closest value.*/
424                         multiplier = (multiplier + round/2) / round;
425                         multiplier = min(multiplier, (u32)1023);
426                 }
427         }
428         return multiplier;
429 }
430
431 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
432 {
433         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
434         struct be_mcc_wrb *wrb
435                 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
436         memset(wrb, 0, sizeof(*wrb));
437         return wrb;
438 }
439
440 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
441 {
442         struct be_queue_info *mccq = &adapter->mcc_obj.q;
443         struct be_mcc_wrb *wrb;
444
445         if (atomic_read(&mccq->used) >= mccq->len) {
446                 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
447                 return NULL;
448         }
449
450         wrb = queue_head_node(mccq);
451         queue_head_inc(mccq);
452         atomic_inc(&mccq->used);
453         memset(wrb, 0, sizeof(*wrb));
454         return wrb;
455 }
456
457 /* Tell fw we're about to start firing cmds by writing a
458  * special pattern across the wrb hdr; uses mbox
459  */
460 int be_cmd_fw_init(struct be_adapter *adapter)
461 {
462         u8 *wrb;
463         int status;
464
465         if (mutex_lock_interruptible(&adapter->mbox_lock))
466                 return -1;
467
468         wrb = (u8 *)wrb_from_mbox(adapter);
469         *wrb++ = 0xFF;
470         *wrb++ = 0x12;
471         *wrb++ = 0x34;
472         *wrb++ = 0xFF;
473         *wrb++ = 0xFF;
474         *wrb++ = 0x56;
475         *wrb++ = 0x78;
476         *wrb = 0xFF;
477
478         status = be_mbox_notify_wait(adapter);
479
480         mutex_unlock(&adapter->mbox_lock);
481         return status;
482 }
483
484 /* Tell fw we're done with firing cmds by writing a
485  * special pattern across the wrb hdr; uses mbox
486  */
487 int be_cmd_fw_clean(struct be_adapter *adapter)
488 {
489         u8 *wrb;
490         int status;
491
492         if (adapter->eeh_err)
493                 return -EIO;
494
495         if (mutex_lock_interruptible(&adapter->mbox_lock))
496                 return -1;
497
498         wrb = (u8 *)wrb_from_mbox(adapter);
499         *wrb++ = 0xFF;
500         *wrb++ = 0xAA;
501         *wrb++ = 0xBB;
502         *wrb++ = 0xFF;
503         *wrb++ = 0xFF;
504         *wrb++ = 0xCC;
505         *wrb++ = 0xDD;
506         *wrb = 0xFF;
507
508         status = be_mbox_notify_wait(adapter);
509
510         mutex_unlock(&adapter->mbox_lock);
511         return status;
512 }
513 int be_cmd_eq_create(struct be_adapter *adapter,
514                 struct be_queue_info *eq, int eq_delay)
515 {
516         struct be_mcc_wrb *wrb;
517         struct be_cmd_req_eq_create *req;
518         struct be_dma_mem *q_mem = &eq->dma_mem;
519         int status;
520
521         if (mutex_lock_interruptible(&adapter->mbox_lock))
522                 return -1;
523
524         wrb = wrb_from_mbox(adapter);
525         req = embedded_payload(wrb);
526
527         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
528
529         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
530                 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
531
532         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
533
534         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
535         /* 4byte eqe*/
536         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
537         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
538                         __ilog2_u32(eq->len/256));
539         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
540                         eq_delay_to_mult(eq_delay));
541         be_dws_cpu_to_le(req->context, sizeof(req->context));
542
543         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
544
545         status = be_mbox_notify_wait(adapter);
546         if (!status) {
547                 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
548                 eq->id = le16_to_cpu(resp->eq_id);
549                 eq->created = true;
550         }
551
552         mutex_unlock(&adapter->mbox_lock);
553         return status;
554 }
555
556 /* Uses mbox */
557 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
558                         u8 type, bool permanent, u32 if_handle)
559 {
560         struct be_mcc_wrb *wrb;
561         struct be_cmd_req_mac_query *req;
562         int status;
563
564         if (mutex_lock_interruptible(&adapter->mbox_lock))
565                 return -1;
566
567         wrb = wrb_from_mbox(adapter);
568         req = embedded_payload(wrb);
569
570         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
571                         OPCODE_COMMON_NTWK_MAC_QUERY);
572
573         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
574                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
575
576         req->type = type;
577         if (permanent) {
578                 req->permanent = 1;
579         } else {
580                 req->if_id = cpu_to_le16((u16) if_handle);
581                 req->permanent = 0;
582         }
583
584         status = be_mbox_notify_wait(adapter);
585         if (!status) {
586                 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
587                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
588         }
589
590         mutex_unlock(&adapter->mbox_lock);
591         return status;
592 }
593
594 /* Uses synchronous MCCQ */
595 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
596                 u32 if_id, u32 *pmac_id)
597 {
598         struct be_mcc_wrb *wrb;
599         struct be_cmd_req_pmac_add *req;
600         int status;
601
602         spin_lock_bh(&adapter->mcc_lock);
603
604         wrb = wrb_from_mccq(adapter);
605         if (!wrb) {
606                 status = -EBUSY;
607                 goto err;
608         }
609         req = embedded_payload(wrb);
610
611         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
612                         OPCODE_COMMON_NTWK_PMAC_ADD);
613
614         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
615                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
616
617         req->if_id = cpu_to_le32(if_id);
618         memcpy(req->mac_address, mac_addr, ETH_ALEN);
619
620         status = be_mcc_notify_wait(adapter);
621         if (!status) {
622                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
623                 *pmac_id = le32_to_cpu(resp->pmac_id);
624         }
625
626 err:
627         spin_unlock_bh(&adapter->mcc_lock);
628         return status;
629 }
630
631 /* Uses synchronous MCCQ */
632 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
633 {
634         struct be_mcc_wrb *wrb;
635         struct be_cmd_req_pmac_del *req;
636         int status;
637
638         spin_lock_bh(&adapter->mcc_lock);
639
640         wrb = wrb_from_mccq(adapter);
641         if (!wrb) {
642                 status = -EBUSY;
643                 goto err;
644         }
645         req = embedded_payload(wrb);
646
647         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
648                         OPCODE_COMMON_NTWK_PMAC_DEL);
649
650         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
651                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
652
653         req->if_id = cpu_to_le32(if_id);
654         req->pmac_id = cpu_to_le32(pmac_id);
655
656         status = be_mcc_notify_wait(adapter);
657
658 err:
659         spin_unlock_bh(&adapter->mcc_lock);
660         return status;
661 }
662
663 /* Uses Mbox */
664 int be_cmd_cq_create(struct be_adapter *adapter,
665                 struct be_queue_info *cq, struct be_queue_info *eq,
666                 bool sol_evts, bool no_delay, int coalesce_wm)
667 {
668         struct be_mcc_wrb *wrb;
669         struct be_cmd_req_cq_create *req;
670         struct be_dma_mem *q_mem = &cq->dma_mem;
671         void *ctxt;
672         int status;
673
674         if (mutex_lock_interruptible(&adapter->mbox_lock))
675                 return -1;
676
677         wrb = wrb_from_mbox(adapter);
678         req = embedded_payload(wrb);
679         ctxt = &req->context;
680
681         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
682                         OPCODE_COMMON_CQ_CREATE);
683
684         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
685                 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
686
687         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
688
689         AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
690         AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
691         AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
692                         __ilog2_u32(cq->len/256));
693         AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
694         AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
695         AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
696         AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
697         AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
698         be_dws_cpu_to_le(ctxt, sizeof(req->context));
699
700         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
701
702         status = be_mbox_notify_wait(adapter);
703         if (!status) {
704                 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
705                 cq->id = le16_to_cpu(resp->cq_id);
706                 cq->created = true;
707         }
708
709         mutex_unlock(&adapter->mbox_lock);
710
711         return status;
712 }
713
714 static u32 be_encoded_q_len(int q_len)
715 {
716         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
717         if (len_encoded == 16)
718                 len_encoded = 0;
719         return len_encoded;
720 }
721
722 int be_cmd_mccq_create(struct be_adapter *adapter,
723                         struct be_queue_info *mccq,
724                         struct be_queue_info *cq)
725 {
726         struct be_mcc_wrb *wrb;
727         struct be_cmd_req_mcc_create *req;
728         struct be_dma_mem *q_mem = &mccq->dma_mem;
729         void *ctxt;
730         int status;
731
732         if (mutex_lock_interruptible(&adapter->mbox_lock))
733                 return -1;
734
735         wrb = wrb_from_mbox(adapter);
736         req = embedded_payload(wrb);
737         ctxt = &req->context;
738
739         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
740                         OPCODE_COMMON_MCC_CREATE_EXT);
741
742         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
743                         OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
744
745         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
746
747         AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
748         AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
749                 be_encoded_q_len(mccq->len));
750         AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
751         /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
752         req->async_event_bitmap[0] |= 0x00000022;
753         be_dws_cpu_to_le(ctxt, sizeof(req->context));
754
755         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
756
757         status = be_mbox_notify_wait(adapter);
758         if (!status) {
759                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
760                 mccq->id = le16_to_cpu(resp->id);
761                 mccq->created = true;
762         }
763         mutex_unlock(&adapter->mbox_lock);
764
765         return status;
766 }
767
768 int be_cmd_txq_create(struct be_adapter *adapter,
769                         struct be_queue_info *txq,
770                         struct be_queue_info *cq)
771 {
772         struct be_mcc_wrb *wrb;
773         struct be_cmd_req_eth_tx_create *req;
774         struct be_dma_mem *q_mem = &txq->dma_mem;
775         void *ctxt;
776         int status;
777
778         if (mutex_lock_interruptible(&adapter->mbox_lock))
779                 return -1;
780
781         wrb = wrb_from_mbox(adapter);
782         req = embedded_payload(wrb);
783         ctxt = &req->context;
784
785         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
786                         OPCODE_ETH_TX_CREATE);
787
788         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
789                 sizeof(*req));
790
791         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
792         req->ulp_num = BE_ULP1_NUM;
793         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
794
795         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
796                 be_encoded_q_len(txq->len));
797         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
798         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
799
800         be_dws_cpu_to_le(ctxt, sizeof(req->context));
801
802         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
803
804         status = be_mbox_notify_wait(adapter);
805         if (!status) {
806                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
807                 txq->id = le16_to_cpu(resp->cid);
808                 txq->created = true;
809         }
810
811         mutex_unlock(&adapter->mbox_lock);
812
813         return status;
814 }
815
816 /* Uses mbox */
817 int be_cmd_rxq_create(struct be_adapter *adapter,
818                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
819                 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
820 {
821         struct be_mcc_wrb *wrb;
822         struct be_cmd_req_eth_rx_create *req;
823         struct be_dma_mem *q_mem = &rxq->dma_mem;
824         int status;
825
826         if (mutex_lock_interruptible(&adapter->mbox_lock))
827                 return -1;
828
829         wrb = wrb_from_mbox(adapter);
830         req = embedded_payload(wrb);
831
832         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
833                         OPCODE_ETH_RX_CREATE);
834
835         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
836                 sizeof(*req));
837
838         req->cq_id = cpu_to_le16(cq_id);
839         req->frag_size = fls(frag_size) - 1;
840         req->num_pages = 2;
841         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
842         req->interface_id = cpu_to_le32(if_id);
843         req->max_frame_size = cpu_to_le16(max_frame_size);
844         req->rss_queue = cpu_to_le32(rss);
845
846         status = be_mbox_notify_wait(adapter);
847         if (!status) {
848                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
849                 rxq->id = le16_to_cpu(resp->id);
850                 rxq->created = true;
851                 *rss_id = resp->rss_id;
852         }
853
854         mutex_unlock(&adapter->mbox_lock);
855
856         return status;
857 }
858
859 /* Generic destroyer function for all types of queues
860  * Uses Mbox
861  */
862 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
863                 int queue_type)
864 {
865         struct be_mcc_wrb *wrb;
866         struct be_cmd_req_q_destroy *req;
867         u8 subsys = 0, opcode = 0;
868         int status;
869
870         if (adapter->eeh_err)
871                 return -EIO;
872
873         if (mutex_lock_interruptible(&adapter->mbox_lock))
874                 return -1;
875
876         wrb = wrb_from_mbox(adapter);
877         req = embedded_payload(wrb);
878
879         switch (queue_type) {
880         case QTYPE_EQ:
881                 subsys = CMD_SUBSYSTEM_COMMON;
882                 opcode = OPCODE_COMMON_EQ_DESTROY;
883                 break;
884         case QTYPE_CQ:
885                 subsys = CMD_SUBSYSTEM_COMMON;
886                 opcode = OPCODE_COMMON_CQ_DESTROY;
887                 break;
888         case QTYPE_TXQ:
889                 subsys = CMD_SUBSYSTEM_ETH;
890                 opcode = OPCODE_ETH_TX_DESTROY;
891                 break;
892         case QTYPE_RXQ:
893                 subsys = CMD_SUBSYSTEM_ETH;
894                 opcode = OPCODE_ETH_RX_DESTROY;
895                 break;
896         case QTYPE_MCCQ:
897                 subsys = CMD_SUBSYSTEM_COMMON;
898                 opcode = OPCODE_COMMON_MCC_DESTROY;
899                 break;
900         default:
901                 BUG();
902         }
903
904         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
905
906         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
907         req->id = cpu_to_le16(q->id);
908
909         status = be_mbox_notify_wait(adapter);
910
911         mutex_unlock(&adapter->mbox_lock);
912
913         return status;
914 }
915
916 /* Create an rx filtering policy configuration on an i/f
917  * Uses mbox
918  */
919 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
920                 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
921                 u32 domain)
922 {
923         struct be_mcc_wrb *wrb;
924         struct be_cmd_req_if_create *req;
925         int status;
926
927         if (mutex_lock_interruptible(&adapter->mbox_lock))
928                 return -1;
929
930         wrb = wrb_from_mbox(adapter);
931         req = embedded_payload(wrb);
932
933         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
934                         OPCODE_COMMON_NTWK_INTERFACE_CREATE);
935
936         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
937                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
938
939         req->hdr.domain = domain;
940         req->capability_flags = cpu_to_le32(cap_flags);
941         req->enable_flags = cpu_to_le32(en_flags);
942         req->pmac_invalid = pmac_invalid;
943         if (!pmac_invalid)
944                 memcpy(req->mac_addr, mac, ETH_ALEN);
945
946         status = be_mbox_notify_wait(adapter);
947         if (!status) {
948                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
949                 *if_handle = le32_to_cpu(resp->interface_id);
950                 if (!pmac_invalid)
951                         *pmac_id = le32_to_cpu(resp->pmac_id);
952         }
953
954         mutex_unlock(&adapter->mbox_lock);
955         return status;
956 }
957
958 /* Uses mbox */
959 int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
960 {
961         struct be_mcc_wrb *wrb;
962         struct be_cmd_req_if_destroy *req;
963         int status;
964
965         if (adapter->eeh_err)
966                 return -EIO;
967
968         if (mutex_lock_interruptible(&adapter->mbox_lock))
969                 return -1;
970
971         wrb = wrb_from_mbox(adapter);
972         req = embedded_payload(wrb);
973
974         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
975                         OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
976
977         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
978                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
979
980         req->interface_id = cpu_to_le32(interface_id);
981
982         status = be_mbox_notify_wait(adapter);
983
984         mutex_unlock(&adapter->mbox_lock);
985
986         return status;
987 }
988
989 /* Get stats is a non embedded command: the request is not embedded inside
990  * WRB but is a separate dma memory block
991  * Uses asynchronous MCC
992  */
993 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
994 {
995         struct be_mcc_wrb *wrb;
996         struct be_cmd_req_get_stats *req;
997         struct be_sge *sge;
998         int status = 0;
999
1000         spin_lock_bh(&adapter->mcc_lock);
1001
1002         wrb = wrb_from_mccq(adapter);
1003         if (!wrb) {
1004                 status = -EBUSY;
1005                 goto err;
1006         }
1007         req = nonemb_cmd->va;
1008         sge = nonembedded_sgl(wrb);
1009
1010         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1011                         OPCODE_ETH_GET_STATISTICS);
1012
1013         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1014                 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
1015         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1016         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1017         sge->len = cpu_to_le32(nonemb_cmd->size);
1018
1019         be_mcc_notify(adapter);
1020         adapter->stats_ioctl_sent = true;
1021
1022 err:
1023         spin_unlock_bh(&adapter->mcc_lock);
1024         return status;
1025 }
1026
1027 /* Uses synchronous mcc */
1028 int be_cmd_link_status_query(struct be_adapter *adapter,
1029                         bool *link_up, u8 *mac_speed, u16 *link_speed)
1030 {
1031         struct be_mcc_wrb *wrb;
1032         struct be_cmd_req_link_status *req;
1033         int status;
1034
1035         spin_lock_bh(&adapter->mcc_lock);
1036
1037         wrb = wrb_from_mccq(adapter);
1038         if (!wrb) {
1039                 status = -EBUSY;
1040                 goto err;
1041         }
1042         req = embedded_payload(wrb);
1043
1044         *link_up = false;
1045
1046         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1047                         OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
1048
1049         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1050                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
1051
1052         status = be_mcc_notify_wait(adapter);
1053         if (!status) {
1054                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1055                 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1056                         *link_up = true;
1057                         *link_speed = le16_to_cpu(resp->link_speed);
1058                         *mac_speed = resp->mac_speed;
1059                 }
1060         }
1061
1062 err:
1063         spin_unlock_bh(&adapter->mcc_lock);
1064         return status;
1065 }
1066
1067 /* Uses Mbox */
1068 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1069 {
1070         struct be_mcc_wrb *wrb;
1071         struct be_cmd_req_get_fw_version *req;
1072         int status;
1073
1074         if (mutex_lock_interruptible(&adapter->mbox_lock))
1075                 return -1;
1076
1077         wrb = wrb_from_mbox(adapter);
1078         req = embedded_payload(wrb);
1079
1080         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1081                         OPCODE_COMMON_GET_FW_VERSION);
1082
1083         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1084                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1085
1086         status = be_mbox_notify_wait(adapter);
1087         if (!status) {
1088                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1089                 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1090         }
1091
1092         mutex_unlock(&adapter->mbox_lock);
1093         return status;
1094 }
1095
1096 /* set the EQ delay interval of an EQ to specified value
1097  * Uses async mcc
1098  */
1099 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1100 {
1101         struct be_mcc_wrb *wrb;
1102         struct be_cmd_req_modify_eq_delay *req;
1103         int status = 0;
1104
1105         spin_lock_bh(&adapter->mcc_lock);
1106
1107         wrb = wrb_from_mccq(adapter);
1108         if (!wrb) {
1109                 status = -EBUSY;
1110                 goto err;
1111         }
1112         req = embedded_payload(wrb);
1113
1114         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1115                         OPCODE_COMMON_MODIFY_EQ_DELAY);
1116
1117         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1118                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1119
1120         req->num_eq = cpu_to_le32(1);
1121         req->delay[0].eq_id = cpu_to_le32(eq_id);
1122         req->delay[0].phase = 0;
1123         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1124
1125         be_mcc_notify(adapter);
1126
1127 err:
1128         spin_unlock_bh(&adapter->mcc_lock);
1129         return status;
1130 }
1131
1132 /* Uses sycnhronous mcc */
1133 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1134                         u32 num, bool untagged, bool promiscuous)
1135 {
1136         struct be_mcc_wrb *wrb;
1137         struct be_cmd_req_vlan_config *req;
1138         int status;
1139
1140         spin_lock_bh(&adapter->mcc_lock);
1141
1142         wrb = wrb_from_mccq(adapter);
1143         if (!wrb) {
1144                 status = -EBUSY;
1145                 goto err;
1146         }
1147         req = embedded_payload(wrb);
1148
1149         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1150                         OPCODE_COMMON_NTWK_VLAN_CONFIG);
1151
1152         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1153                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1154
1155         req->interface_id = if_id;
1156         req->promiscuous = promiscuous;
1157         req->untagged = untagged;
1158         req->num_vlan = num;
1159         if (!promiscuous) {
1160                 memcpy(req->normal_vlan, vtag_array,
1161                         req->num_vlan * sizeof(vtag_array[0]));
1162         }
1163
1164         status = be_mcc_notify_wait(adapter);
1165
1166 err:
1167         spin_unlock_bh(&adapter->mcc_lock);
1168         return status;
1169 }
1170
1171 /* Uses MCC for this command as it may be called in BH context
1172  * Uses synchronous mcc
1173  */
1174 int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
1175 {
1176         struct be_mcc_wrb *wrb;
1177         struct be_cmd_req_promiscuous_config *req;
1178         int status;
1179
1180         spin_lock_bh(&adapter->mcc_lock);
1181
1182         wrb = wrb_from_mccq(adapter);
1183         if (!wrb) {
1184                 status = -EBUSY;
1185                 goto err;
1186         }
1187         req = embedded_payload(wrb);
1188
1189         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
1190
1191         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1192                 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
1193
1194         /* In FW versions X.102.149/X.101.487 and later,
1195          * the port setting associated only with the
1196          * issuing pci function will take effect
1197          */
1198         if (port_num)
1199                 req->port1_promiscuous = en;
1200         else
1201                 req->port0_promiscuous = en;
1202
1203         status = be_mcc_notify_wait(adapter);
1204
1205 err:
1206         spin_unlock_bh(&adapter->mcc_lock);
1207         return status;
1208 }
1209
1210 /*
1211  * Uses MCC for this command as it may be called in BH context
1212  * (mc == NULL) => multicast promiscous
1213  */
1214 int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1215                 struct net_device *netdev, struct be_dma_mem *mem)
1216 {
1217         struct be_mcc_wrb *wrb;
1218         struct be_cmd_req_mcast_mac_config *req = mem->va;
1219         struct be_sge *sge;
1220         int status;
1221
1222         spin_lock_bh(&adapter->mcc_lock);
1223
1224         wrb = wrb_from_mccq(adapter);
1225         if (!wrb) {
1226                 status = -EBUSY;
1227                 goto err;
1228         }
1229         sge = nonembedded_sgl(wrb);
1230         memset(req, 0, sizeof(*req));
1231
1232         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1233                         OPCODE_COMMON_NTWK_MULTICAST_SET);
1234         sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1235         sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1236         sge->len = cpu_to_le32(mem->size);
1237
1238         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1239                 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1240
1241         req->interface_id = if_id;
1242         if (netdev) {
1243                 int i;
1244                 struct netdev_hw_addr *ha;
1245
1246                 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1247
1248                 i = 0;
1249                 netdev_for_each_mc_addr(ha, netdev)
1250                         memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
1251         } else {
1252                 req->promiscuous = 1;
1253         }
1254
1255         status = be_mcc_notify_wait(adapter);
1256
1257 err:
1258         spin_unlock_bh(&adapter->mcc_lock);
1259         return status;
1260 }
1261
1262 /* Uses synchrounous mcc */
1263 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1264 {
1265         struct be_mcc_wrb *wrb;
1266         struct be_cmd_req_set_flow_control *req;
1267         int status;
1268
1269         spin_lock_bh(&adapter->mcc_lock);
1270
1271         wrb = wrb_from_mccq(adapter);
1272         if (!wrb) {
1273                 status = -EBUSY;
1274                 goto err;
1275         }
1276         req = embedded_payload(wrb);
1277
1278         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1279                         OPCODE_COMMON_SET_FLOW_CONTROL);
1280
1281         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1282                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1283
1284         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1285         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1286
1287         status = be_mcc_notify_wait(adapter);
1288
1289 err:
1290         spin_unlock_bh(&adapter->mcc_lock);
1291         return status;
1292 }
1293
1294 /* Uses sycn mcc */
1295 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1296 {
1297         struct be_mcc_wrb *wrb;
1298         struct be_cmd_req_get_flow_control *req;
1299         int status;
1300
1301         spin_lock_bh(&adapter->mcc_lock);
1302
1303         wrb = wrb_from_mccq(adapter);
1304         if (!wrb) {
1305                 status = -EBUSY;
1306                 goto err;
1307         }
1308         req = embedded_payload(wrb);
1309
1310         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1311                         OPCODE_COMMON_GET_FLOW_CONTROL);
1312
1313         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1314                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1315
1316         status = be_mcc_notify_wait(adapter);
1317         if (!status) {
1318                 struct be_cmd_resp_get_flow_control *resp =
1319                                                 embedded_payload(wrb);
1320                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1321                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1322         }
1323
1324 err:
1325         spin_unlock_bh(&adapter->mcc_lock);
1326         return status;
1327 }
1328
1329 /* Uses mbox */
1330 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1331                 u32 *mode, u32 *caps)
1332 {
1333         struct be_mcc_wrb *wrb;
1334         struct be_cmd_req_query_fw_cfg *req;
1335         int status;
1336
1337         if (mutex_lock_interruptible(&adapter->mbox_lock))
1338                 return -1;
1339
1340         wrb = wrb_from_mbox(adapter);
1341         req = embedded_payload(wrb);
1342
1343         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1344                         OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
1345
1346         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1347                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1348
1349         status = be_mbox_notify_wait(adapter);
1350         if (!status) {
1351                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1352                 *port_num = le32_to_cpu(resp->phys_port);
1353                 *mode = le32_to_cpu(resp->function_mode);
1354                 *caps = le32_to_cpu(resp->function_caps);
1355         }
1356
1357         mutex_unlock(&adapter->mbox_lock);
1358         return status;
1359 }
1360
1361 /* Uses mbox */
1362 int be_cmd_reset_function(struct be_adapter *adapter)
1363 {
1364         struct be_mcc_wrb *wrb;
1365         struct be_cmd_req_hdr *req;
1366         int status;
1367
1368         if (mutex_lock_interruptible(&adapter->mbox_lock))
1369                 return -1;
1370
1371         wrb = wrb_from_mbox(adapter);
1372         req = embedded_payload(wrb);
1373
1374         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1375                         OPCODE_COMMON_FUNCTION_RESET);
1376
1377         be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1378                 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1379
1380         status = be_mbox_notify_wait(adapter);
1381
1382         mutex_unlock(&adapter->mbox_lock);
1383         return status;
1384 }
1385
1386 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1387 {
1388         struct be_mcc_wrb *wrb;
1389         struct be_cmd_req_rss_config *req;
1390         u32 myhash[10];
1391         int status;
1392
1393         if (mutex_lock_interruptible(&adapter->mbox_lock))
1394                 return -1;
1395
1396         wrb = wrb_from_mbox(adapter);
1397         req = embedded_payload(wrb);
1398
1399         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1400                 OPCODE_ETH_RSS_CONFIG);
1401
1402         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1403                 OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1404
1405         req->if_id = cpu_to_le32(adapter->if_handle);
1406         req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1407         req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1408         memcpy(req->cpu_table, rsstable, table_size);
1409         memcpy(req->hash, myhash, sizeof(myhash));
1410         be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1411
1412         status = be_mbox_notify_wait(adapter);
1413
1414         mutex_unlock(&adapter->mbox_lock);
1415         return status;
1416 }
1417
1418 /* Uses sync mcc */
1419 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1420                         u8 bcn, u8 sts, u8 state)
1421 {
1422         struct be_mcc_wrb *wrb;
1423         struct be_cmd_req_enable_disable_beacon *req;
1424         int status;
1425
1426         spin_lock_bh(&adapter->mcc_lock);
1427
1428         wrb = wrb_from_mccq(adapter);
1429         if (!wrb) {
1430                 status = -EBUSY;
1431                 goto err;
1432         }
1433         req = embedded_payload(wrb);
1434
1435         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1436                         OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1437
1438         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1439                 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1440
1441         req->port_num = port_num;
1442         req->beacon_state = state;
1443         req->beacon_duration = bcn;
1444         req->status_duration = sts;
1445
1446         status = be_mcc_notify_wait(adapter);
1447
1448 err:
1449         spin_unlock_bh(&adapter->mcc_lock);
1450         return status;
1451 }
1452
1453 /* Uses sync mcc */
1454 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1455 {
1456         struct be_mcc_wrb *wrb;
1457         struct be_cmd_req_get_beacon_state *req;
1458         int status;
1459
1460         spin_lock_bh(&adapter->mcc_lock);
1461
1462         wrb = wrb_from_mccq(adapter);
1463         if (!wrb) {
1464                 status = -EBUSY;
1465                 goto err;
1466         }
1467         req = embedded_payload(wrb);
1468
1469         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1470                         OPCODE_COMMON_GET_BEACON_STATE);
1471
1472         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1473                 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1474
1475         req->port_num = port_num;
1476
1477         status = be_mcc_notify_wait(adapter);
1478         if (!status) {
1479                 struct be_cmd_resp_get_beacon_state *resp =
1480                                                 embedded_payload(wrb);
1481                 *state = resp->beacon_state;
1482         }
1483
1484 err:
1485         spin_unlock_bh(&adapter->mcc_lock);
1486         return status;
1487 }
1488
1489 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1490                         u32 flash_type, u32 flash_opcode, u32 buf_size)
1491 {
1492         struct be_mcc_wrb *wrb;
1493         struct be_cmd_write_flashrom *req;
1494         struct be_sge *sge;
1495         int status;
1496
1497         spin_lock_bh(&adapter->mcc_lock);
1498         adapter->flash_status = 0;
1499
1500         wrb = wrb_from_mccq(adapter);
1501         if (!wrb) {
1502                 status = -EBUSY;
1503                 goto err_unlock;
1504         }
1505         req = cmd->va;
1506         sge = nonembedded_sgl(wrb);
1507
1508         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1509                         OPCODE_COMMON_WRITE_FLASHROM);
1510         wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1511
1512         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1513                 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1514         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1515         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1516         sge->len = cpu_to_le32(cmd->size);
1517
1518         req->params.op_type = cpu_to_le32(flash_type);
1519         req->params.op_code = cpu_to_le32(flash_opcode);
1520         req->params.data_buf_size = cpu_to_le32(buf_size);
1521
1522         be_mcc_notify(adapter);
1523         spin_unlock_bh(&adapter->mcc_lock);
1524
1525         if (!wait_for_completion_timeout(&adapter->flash_compl,
1526                         msecs_to_jiffies(12000)))
1527                 status = -1;
1528         else
1529                 status = adapter->flash_status;
1530
1531         return status;
1532
1533 err_unlock:
1534         spin_unlock_bh(&adapter->mcc_lock);
1535         return status;
1536 }
1537
1538 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1539                          int offset)
1540 {
1541         struct be_mcc_wrb *wrb;
1542         struct be_cmd_write_flashrom *req;
1543         int status;
1544
1545         spin_lock_bh(&adapter->mcc_lock);
1546
1547         wrb = wrb_from_mccq(adapter);
1548         if (!wrb) {
1549                 status = -EBUSY;
1550                 goto err;
1551         }
1552         req = embedded_payload(wrb);
1553
1554         be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
1555                         OPCODE_COMMON_READ_FLASHROM);
1556
1557         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1558                 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1559
1560         req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1561         req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1562         req->params.offset = cpu_to_le32(offset);
1563         req->params.data_buf_size = cpu_to_le32(0x4);
1564
1565         status = be_mcc_notify_wait(adapter);
1566         if (!status)
1567                 memcpy(flashed_crc, req->params.data_buf, 4);
1568
1569 err:
1570         spin_unlock_bh(&adapter->mcc_lock);
1571         return status;
1572 }
1573
1574 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1575                                 struct be_dma_mem *nonemb_cmd)
1576 {
1577         struct be_mcc_wrb *wrb;
1578         struct be_cmd_req_acpi_wol_magic_config *req;
1579         struct be_sge *sge;
1580         int status;
1581
1582         spin_lock_bh(&adapter->mcc_lock);
1583
1584         wrb = wrb_from_mccq(adapter);
1585         if (!wrb) {
1586                 status = -EBUSY;
1587                 goto err;
1588         }
1589         req = nonemb_cmd->va;
1590         sge = nonembedded_sgl(wrb);
1591
1592         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1593                         OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
1594
1595         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1596                 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
1597         memcpy(req->magic_mac, mac, ETH_ALEN);
1598
1599         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1600         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1601         sge->len = cpu_to_le32(nonemb_cmd->size);
1602
1603         status = be_mcc_notify_wait(adapter);
1604
1605 err:
1606         spin_unlock_bh(&adapter->mcc_lock);
1607         return status;
1608 }
1609
1610 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1611                         u8 loopback_type, u8 enable)
1612 {
1613         struct be_mcc_wrb *wrb;
1614         struct be_cmd_req_set_lmode *req;
1615         int status;
1616
1617         spin_lock_bh(&adapter->mcc_lock);
1618
1619         wrb = wrb_from_mccq(adapter);
1620         if (!wrb) {
1621                 status = -EBUSY;
1622                 goto err;
1623         }
1624
1625         req = embedded_payload(wrb);
1626
1627         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1628                                 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1629
1630         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1631                         OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1632                         sizeof(*req));
1633
1634         req->src_port = port_num;
1635         req->dest_port = port_num;
1636         req->loopback_type = loopback_type;
1637         req->loopback_state = enable;
1638
1639         status = be_mcc_notify_wait(adapter);
1640 err:
1641         spin_unlock_bh(&adapter->mcc_lock);
1642         return status;
1643 }
1644
1645 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1646                 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1647 {
1648         struct be_mcc_wrb *wrb;
1649         struct be_cmd_req_loopback_test *req;
1650         int status;
1651
1652         spin_lock_bh(&adapter->mcc_lock);
1653
1654         wrb = wrb_from_mccq(adapter);
1655         if (!wrb) {
1656                 status = -EBUSY;
1657                 goto err;
1658         }
1659
1660         req = embedded_payload(wrb);
1661
1662         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1663                                 OPCODE_LOWLEVEL_LOOPBACK_TEST);
1664
1665         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1666                         OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1667         req->hdr.timeout = cpu_to_le32(4);
1668
1669         req->pattern = cpu_to_le64(pattern);
1670         req->src_port = cpu_to_le32(port_num);
1671         req->dest_port = cpu_to_le32(port_num);
1672         req->pkt_size = cpu_to_le32(pkt_size);
1673         req->num_pkts = cpu_to_le32(num_pkts);
1674         req->loopback_type = cpu_to_le32(loopback_type);
1675
1676         status = be_mcc_notify_wait(adapter);
1677         if (!status) {
1678                 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
1679                 status = le32_to_cpu(resp->status);
1680         }
1681
1682 err:
1683         spin_unlock_bh(&adapter->mcc_lock);
1684         return status;
1685 }
1686
1687 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
1688                                 u32 byte_cnt, struct be_dma_mem *cmd)
1689 {
1690         struct be_mcc_wrb *wrb;
1691         struct be_cmd_req_ddrdma_test *req;
1692         struct be_sge *sge;
1693         int status;
1694         int i, j = 0;
1695
1696         spin_lock_bh(&adapter->mcc_lock);
1697
1698         wrb = wrb_from_mccq(adapter);
1699         if (!wrb) {
1700                 status = -EBUSY;
1701                 goto err;
1702         }
1703         req = cmd->va;
1704         sge = nonembedded_sgl(wrb);
1705         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1706                                 OPCODE_LOWLEVEL_HOST_DDR_DMA);
1707         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1708                         OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
1709
1710         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1711         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1712         sge->len = cpu_to_le32(cmd->size);
1713
1714         req->pattern = cpu_to_le64(pattern);
1715         req->byte_count = cpu_to_le32(byte_cnt);
1716         for (i = 0; i < byte_cnt; i++) {
1717                 req->snd_buff[i] = (u8)(pattern >> (j*8));
1718                 j++;
1719                 if (j > 7)
1720                         j = 0;
1721         }
1722
1723         status = be_mcc_notify_wait(adapter);
1724
1725         if (!status) {
1726                 struct be_cmd_resp_ddrdma_test *resp;
1727                 resp = cmd->va;
1728                 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
1729                                 resp->snd_err) {
1730                         status = -1;
1731                 }
1732         }
1733
1734 err:
1735         spin_unlock_bh(&adapter->mcc_lock);
1736         return status;
1737 }
1738
1739 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1740                                 struct be_dma_mem *nonemb_cmd)
1741 {
1742         struct be_mcc_wrb *wrb;
1743         struct be_cmd_req_seeprom_read *req;
1744         struct be_sge *sge;
1745         int status;
1746
1747         spin_lock_bh(&adapter->mcc_lock);
1748
1749         wrb = wrb_from_mccq(adapter);
1750         req = nonemb_cmd->va;
1751         sge = nonembedded_sgl(wrb);
1752
1753         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1754                         OPCODE_COMMON_SEEPROM_READ);
1755
1756         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1757                         OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
1758
1759         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1760         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1761         sge->len = cpu_to_le32(nonemb_cmd->size);
1762
1763         status = be_mcc_notify_wait(adapter);
1764
1765         spin_unlock_bh(&adapter->mcc_lock);
1766         return status;
1767 }
1768
1769 int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
1770 {
1771         struct be_mcc_wrb *wrb;
1772         struct be_cmd_req_get_phy_info *req;
1773         struct be_sge *sge;
1774         int status;
1775
1776         spin_lock_bh(&adapter->mcc_lock);
1777
1778         wrb = wrb_from_mccq(adapter);
1779         if (!wrb) {
1780                 status = -EBUSY;
1781                 goto err;
1782         }
1783
1784         req = cmd->va;
1785         sge = nonembedded_sgl(wrb);
1786
1787         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1788                                 OPCODE_COMMON_GET_PHY_DETAILS);
1789
1790         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1791                         OPCODE_COMMON_GET_PHY_DETAILS,
1792                         sizeof(*req));
1793
1794         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1795         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1796         sge->len = cpu_to_le32(cmd->size);
1797
1798         status = be_mcc_notify_wait(adapter);
1799 err:
1800         spin_unlock_bh(&adapter->mcc_lock);
1801         return status;
1802 }
1803
1804 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
1805 {
1806         struct be_mcc_wrb *wrb;
1807         struct be_cmd_req_set_qos *req;
1808         int status;
1809
1810         spin_lock_bh(&adapter->mcc_lock);
1811
1812         wrb = wrb_from_mccq(adapter);
1813         if (!wrb) {
1814                 status = -EBUSY;
1815                 goto err;
1816         }
1817
1818         req = embedded_payload(wrb);
1819
1820         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1821                                 OPCODE_COMMON_SET_QOS);
1822
1823         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1824                         OPCODE_COMMON_SET_QOS, sizeof(*req));
1825
1826         req->hdr.domain = domain;
1827         req->valid_bits = BE_QOS_BITS_NIC;
1828         req->max_bps_nic = bps;
1829
1830         status = be_mcc_notify_wait(adapter);
1831
1832 err:
1833         spin_unlock_bh(&adapter->mcc_lock);
1834         return status;
1835 }