]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/firewire/ohci.c
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
[linux-imx.git] / drivers / firewire / ohci.c
1 /*
2  * Driver for OHCI 1394 controllers
3  *
4  * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software Foundation,
18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20
21 #include <linux/bitops.h>
22 #include <linux/bug.h>
23 #include <linux/compiler.h>
24 #include <linux/delay.h>
25 #include <linux/device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/firewire.h>
28 #include <linux/firewire-constants.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/io.h>
32 #include <linux/kernel.h>
33 #include <linux/list.h>
34 #include <linux/mm.h>
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/mutex.h>
38 #include <linux/pci.h>
39 #include <linux/pci_ids.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/string.h>
43 #include <linux/time.h>
44 #include <linux/vmalloc.h>
45 #include <linux/workqueue.h>
46
47 #include <asm/byteorder.h>
48 #include <asm/page.h>
49
50 #ifdef CONFIG_PPC_PMAC
51 #include <asm/pmac_feature.h>
52 #endif
53
54 #include "core.h"
55 #include "ohci.h"
56
57 #define DESCRIPTOR_OUTPUT_MORE          0
58 #define DESCRIPTOR_OUTPUT_LAST          (1 << 12)
59 #define DESCRIPTOR_INPUT_MORE           (2 << 12)
60 #define DESCRIPTOR_INPUT_LAST           (3 << 12)
61 #define DESCRIPTOR_STATUS               (1 << 11)
62 #define DESCRIPTOR_KEY_IMMEDIATE        (2 << 8)
63 #define DESCRIPTOR_PING                 (1 << 7)
64 #define DESCRIPTOR_YY                   (1 << 6)
65 #define DESCRIPTOR_NO_IRQ               (0 << 4)
66 #define DESCRIPTOR_IRQ_ERROR            (1 << 4)
67 #define DESCRIPTOR_IRQ_ALWAYS           (3 << 4)
68 #define DESCRIPTOR_BRANCH_ALWAYS        (3 << 2)
69 #define DESCRIPTOR_WAIT                 (3 << 0)
70
71 struct descriptor {
72         __le16 req_count;
73         __le16 control;
74         __le32 data_address;
75         __le32 branch_address;
76         __le16 res_count;
77         __le16 transfer_status;
78 } __attribute__((aligned(16)));
79
80 #define CONTROL_SET(regs)       (regs)
81 #define CONTROL_CLEAR(regs)     ((regs) + 4)
82 #define COMMAND_PTR(regs)       ((regs) + 12)
83 #define CONTEXT_MATCH(regs)     ((regs) + 16)
84
85 #define AR_BUFFER_SIZE  (32*1024)
86 #define AR_BUFFERS_MIN  DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
87 /* we need at least two pages for proper list management */
88 #define AR_BUFFERS      (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
89
90 #define MAX_ASYNC_PAYLOAD       4096
91 #define MAX_AR_PACKET_SIZE      (16 + MAX_ASYNC_PAYLOAD + 4)
92 #define AR_WRAPAROUND_PAGES     DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
93
94 struct ar_context {
95         struct fw_ohci *ohci;
96         struct page *pages[AR_BUFFERS];
97         void *buffer;
98         struct descriptor *descriptors;
99         dma_addr_t descriptors_bus;
100         void *pointer;
101         unsigned int last_buffer_index;
102         u32 regs;
103         struct tasklet_struct tasklet;
104 };
105
106 struct context;
107
108 typedef int (*descriptor_callback_t)(struct context *ctx,
109                                      struct descriptor *d,
110                                      struct descriptor *last);
111
112 /*
113  * A buffer that contains a block of DMA-able coherent memory used for
114  * storing a portion of a DMA descriptor program.
115  */
116 struct descriptor_buffer {
117         struct list_head list;
118         dma_addr_t buffer_bus;
119         size_t buffer_size;
120         size_t used;
121         struct descriptor buffer[0];
122 };
123
124 struct context {
125         struct fw_ohci *ohci;
126         u32 regs;
127         int total_allocation;
128         u32 current_bus;
129         bool running;
130         bool flushing;
131
132         /*
133          * List of page-sized buffers for storing DMA descriptors.
134          * Head of list contains buffers in use and tail of list contains
135          * free buffers.
136          */
137         struct list_head buffer_list;
138
139         /*
140          * Pointer to a buffer inside buffer_list that contains the tail
141          * end of the current DMA program.
142          */
143         struct descriptor_buffer *buffer_tail;
144
145         /*
146          * The descriptor containing the branch address of the first
147          * descriptor that has not yet been filled by the device.
148          */
149         struct descriptor *last;
150
151         /*
152          * The last descriptor in the DMA program.  It contains the branch
153          * address that must be updated upon appending a new descriptor.
154          */
155         struct descriptor *prev;
156
157         descriptor_callback_t callback;
158
159         struct tasklet_struct tasklet;
160 };
161
162 #define IT_HEADER_SY(v)          ((v) <<  0)
163 #define IT_HEADER_TCODE(v)       ((v) <<  4)
164 #define IT_HEADER_CHANNEL(v)     ((v) <<  8)
165 #define IT_HEADER_TAG(v)         ((v) << 14)
166 #define IT_HEADER_SPEED(v)       ((v) << 16)
167 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
168
169 struct iso_context {
170         struct fw_iso_context base;
171         struct context context;
172         void *header;
173         size_t header_length;
174         unsigned long flushing_completions;
175         u32 mc_buffer_bus;
176         u16 mc_completed;
177         u16 last_timestamp;
178         u8 sync;
179         u8 tags;
180 };
181
182 #define CONFIG_ROM_SIZE 1024
183
184 struct fw_ohci {
185         struct fw_card card;
186
187         __iomem char *registers;
188         int node_id;
189         int generation;
190         int request_generation; /* for timestamping incoming requests */
191         unsigned quirks;
192         unsigned int pri_req_max;
193         u32 bus_time;
194         bool is_root;
195         bool csr_state_setclear_abdicate;
196         int n_ir;
197         int n_it;
198         /*
199          * Spinlock for accessing fw_ohci data.  Never call out of
200          * this driver with this lock held.
201          */
202         spinlock_t lock;
203
204         struct mutex phy_reg_mutex;
205
206         void *misc_buffer;
207         dma_addr_t misc_buffer_bus;
208
209         struct ar_context ar_request_ctx;
210         struct ar_context ar_response_ctx;
211         struct context at_request_ctx;
212         struct context at_response_ctx;
213
214         u32 it_context_support;
215         u32 it_context_mask;     /* unoccupied IT contexts */
216         struct iso_context *it_context_list;
217         u64 ir_context_channels; /* unoccupied channels */
218         u32 ir_context_support;
219         u32 ir_context_mask;     /* unoccupied IR contexts */
220         struct iso_context *ir_context_list;
221         u64 mc_channels; /* channels in use by the multichannel IR context */
222         bool mc_allocated;
223
224         __be32    *config_rom;
225         dma_addr_t config_rom_bus;
226         __be32    *next_config_rom;
227         dma_addr_t next_config_rom_bus;
228         __be32     next_header;
229
230         __le32    *self_id_cpu;
231         dma_addr_t self_id_bus;
232         struct work_struct bus_reset_work;
233
234         u32 self_id_buffer[512];
235 };
236
237 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
238 {
239         return container_of(card, struct fw_ohci, card);
240 }
241
242 #define IT_CONTEXT_CYCLE_MATCH_ENABLE   0x80000000
243 #define IR_CONTEXT_BUFFER_FILL          0x80000000
244 #define IR_CONTEXT_ISOCH_HEADER         0x40000000
245 #define IR_CONTEXT_CYCLE_MATCH_ENABLE   0x20000000
246 #define IR_CONTEXT_MULTI_CHANNEL_MODE   0x10000000
247 #define IR_CONTEXT_DUAL_BUFFER_MODE     0x08000000
248
249 #define CONTEXT_RUN     0x8000
250 #define CONTEXT_WAKE    0x1000
251 #define CONTEXT_DEAD    0x0800
252 #define CONTEXT_ACTIVE  0x0400
253
254 #define OHCI1394_MAX_AT_REQ_RETRIES     0xf
255 #define OHCI1394_MAX_AT_RESP_RETRIES    0x2
256 #define OHCI1394_MAX_PHYS_RESP_RETRIES  0x8
257
258 #define OHCI1394_REGISTER_SIZE          0x800
259 #define OHCI1394_PCI_HCI_Control        0x40
260 #define SELF_ID_BUF_SIZE                0x800
261 #define OHCI_TCODE_PHY_PACKET           0x0e
262 #define OHCI_VERSION_1_1                0x010010
263
264 static char ohci_driver_name[] = KBUILD_MODNAME;
265
266 #define PCI_DEVICE_ID_AGERE_FW643       0x5901
267 #define PCI_DEVICE_ID_CREATIVE_SB1394   0x4001
268 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
269 #define PCI_DEVICE_ID_TI_TSB12LV22      0x8009
270 #define PCI_DEVICE_ID_TI_TSB12LV26      0x8020
271 #define PCI_DEVICE_ID_TI_TSB82AA2       0x8025
272 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS  0x11bd
273
274 #define QUIRK_CYCLE_TIMER               1
275 #define QUIRK_RESET_PACKET              2
276 #define QUIRK_BE_HEADERS                4
277 #define QUIRK_NO_1394A                  8
278 #define QUIRK_NO_MSI                    16
279 #define QUIRK_TI_SLLZ059                32
280
281 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
282 static const struct {
283         unsigned short vendor, device, revision, flags;
284 } ohci_quirks[] = {
285         {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
286                 QUIRK_CYCLE_TIMER},
287
288         {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
289                 QUIRK_BE_HEADERS},
290
291         {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
292                 QUIRK_NO_MSI},
293
294         {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
295                 QUIRK_RESET_PACKET},
296
297         {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
298                 QUIRK_NO_MSI},
299
300         {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
301                 QUIRK_CYCLE_TIMER},
302
303         {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
304                 QUIRK_NO_MSI},
305
306         {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
307                 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
308
309         {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
310                 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
311
312         {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
313                 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
314
315         {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
316                 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
317
318         {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
319                 QUIRK_RESET_PACKET},
320
321         {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
322                 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
323 };
324
325 /* This overrides anything that was found in ohci_quirks[]. */
326 static int param_quirks;
327 module_param_named(quirks, param_quirks, int, 0644);
328 MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
329         ", nonatomic cycle timer = "    __stringify(QUIRK_CYCLE_TIMER)
330         ", reset packet generation = "  __stringify(QUIRK_RESET_PACKET)
331         ", AR/selfID endianess = "      __stringify(QUIRK_BE_HEADERS)
332         ", no 1394a enhancements = "    __stringify(QUIRK_NO_1394A)
333         ", disable MSI = "              __stringify(QUIRK_NO_MSI)
334         ", TI SLLZ059 erratum = "       __stringify(QUIRK_TI_SLLZ059)
335         ")");
336
337 #define OHCI_PARAM_DEBUG_AT_AR          1
338 #define OHCI_PARAM_DEBUG_SELFIDS        2
339 #define OHCI_PARAM_DEBUG_IRQS           4
340 #define OHCI_PARAM_DEBUG_BUSRESETS      8 /* only effective before chip init */
341
342 static int param_debug;
343 module_param_named(debug, param_debug, int, 0644);
344 MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
345         ", AT/AR events = "     __stringify(OHCI_PARAM_DEBUG_AT_AR)
346         ", self-IDs = "         __stringify(OHCI_PARAM_DEBUG_SELFIDS)
347         ", IRQs = "             __stringify(OHCI_PARAM_DEBUG_IRQS)
348         ", busReset events = "  __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
349         ", or a combination, or all = -1)");
350
351 static void log_irqs(struct fw_ohci *ohci, u32 evt)
352 {
353         if (likely(!(param_debug &
354                         (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
355                 return;
356
357         if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
358             !(evt & OHCI1394_busReset))
359                 return;
360
361         dev_notice(ohci->card.device,
362             "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
363             evt & OHCI1394_selfIDComplete       ? " selfID"             : "",
364             evt & OHCI1394_RQPkt                ? " AR_req"             : "",
365             evt & OHCI1394_RSPkt                ? " AR_resp"            : "",
366             evt & OHCI1394_reqTxComplete        ? " AT_req"             : "",
367             evt & OHCI1394_respTxComplete       ? " AT_resp"            : "",
368             evt & OHCI1394_isochRx              ? " IR"                 : "",
369             evt & OHCI1394_isochTx              ? " IT"                 : "",
370             evt & OHCI1394_postedWriteErr       ? " postedWriteErr"     : "",
371             evt & OHCI1394_cycleTooLong         ? " cycleTooLong"       : "",
372             evt & OHCI1394_cycle64Seconds       ? " cycle64Seconds"     : "",
373             evt & OHCI1394_cycleInconsistent    ? " cycleInconsistent"  : "",
374             evt & OHCI1394_regAccessFail        ? " regAccessFail"      : "",
375             evt & OHCI1394_unrecoverableError   ? " unrecoverableError" : "",
376             evt & OHCI1394_busReset             ? " busReset"           : "",
377             evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
378                     OHCI1394_RSPkt | OHCI1394_reqTxComplete |
379                     OHCI1394_respTxComplete | OHCI1394_isochRx |
380                     OHCI1394_isochTx | OHCI1394_postedWriteErr |
381                     OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
382                     OHCI1394_cycleInconsistent |
383                     OHCI1394_regAccessFail | OHCI1394_busReset)
384                                                 ? " ?"                  : "");
385 }
386
387 static const char *speed[] = {
388         [0] = "S100", [1] = "S200", [2] = "S400",    [3] = "beta",
389 };
390 static const char *power[] = {
391         [0] = "+0W",  [1] = "+15W", [2] = "+30W",    [3] = "+45W",
392         [4] = "-3W",  [5] = " ?W",  [6] = "-3..-6W", [7] = "-3..-10W",
393 };
394 static const char port[] = { '.', '-', 'p', 'c', };
395
396 static char _p(u32 *s, int shift)
397 {
398         return port[*s >> shift & 3];
399 }
400
401 static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
402 {
403         u32 *s;
404
405         if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
406                 return;
407
408         dev_notice(ohci->card.device,
409                    "%d selfIDs, generation %d, local node ID %04x\n",
410                    self_id_count, generation, ohci->node_id);
411
412         for (s = ohci->self_id_buffer; self_id_count--; ++s)
413                 if ((*s & 1 << 23) == 0)
414                         dev_notice(ohci->card.device,
415                             "selfID 0: %08x, phy %d [%c%c%c] "
416                             "%s gc=%d %s %s%s%s\n",
417                             *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
418                             speed[*s >> 14 & 3], *s >> 16 & 63,
419                             power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
420                             *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
421                 else
422                         dev_notice(ohci->card.device,
423                             "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
424                             *s, *s >> 24 & 63,
425                             _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
426                             _p(s,  8), _p(s,  6), _p(s,  4), _p(s,  2));
427 }
428
429 static const char *evts[] = {
430         [0x00] = "evt_no_status",       [0x01] = "-reserved-",
431         [0x02] = "evt_long_packet",     [0x03] = "evt_missing_ack",
432         [0x04] = "evt_underrun",        [0x05] = "evt_overrun",
433         [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
434         [0x08] = "evt_data_write",      [0x09] = "evt_bus_reset",
435         [0x0a] = "evt_timeout",         [0x0b] = "evt_tcode_err",
436         [0x0c] = "-reserved-",          [0x0d] = "-reserved-",
437         [0x0e] = "evt_unknown",         [0x0f] = "evt_flushed",
438         [0x10] = "-reserved-",          [0x11] = "ack_complete",
439         [0x12] = "ack_pending ",        [0x13] = "-reserved-",
440         [0x14] = "ack_busy_X",          [0x15] = "ack_busy_A",
441         [0x16] = "ack_busy_B",          [0x17] = "-reserved-",
442         [0x18] = "-reserved-",          [0x19] = "-reserved-",
443         [0x1a] = "-reserved-",          [0x1b] = "ack_tardy",
444         [0x1c] = "-reserved-",          [0x1d] = "ack_data_error",
445         [0x1e] = "ack_type_error",      [0x1f] = "-reserved-",
446         [0x20] = "pending/cancelled",
447 };
448 static const char *tcodes[] = {
449         [0x0] = "QW req",               [0x1] = "BW req",
450         [0x2] = "W resp",               [0x3] = "-reserved-",
451         [0x4] = "QR req",               [0x5] = "BR req",
452         [0x6] = "QR resp",              [0x7] = "BR resp",
453         [0x8] = "cycle start",          [0x9] = "Lk req",
454         [0xa] = "async stream packet",  [0xb] = "Lk resp",
455         [0xc] = "-reserved-",           [0xd] = "-reserved-",
456         [0xe] = "link internal",        [0xf] = "-reserved-",
457 };
458
459 static void log_ar_at_event(struct fw_ohci *ohci,
460                             char dir, int speed, u32 *header, int evt)
461 {
462         int tcode = header[0] >> 4 & 0xf;
463         char specific[12];
464
465         if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
466                 return;
467
468         if (unlikely(evt >= ARRAY_SIZE(evts)))
469                         evt = 0x1f;
470
471         if (evt == OHCI1394_evt_bus_reset) {
472                 dev_notice(ohci->card.device,
473                            "A%c evt_bus_reset, generation %d\n",
474                            dir, (header[2] >> 16) & 0xff);
475                 return;
476         }
477
478         switch (tcode) {
479         case 0x0: case 0x6: case 0x8:
480                 snprintf(specific, sizeof(specific), " = %08x",
481                          be32_to_cpu((__force __be32)header[3]));
482                 break;
483         case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
484                 snprintf(specific, sizeof(specific), " %x,%x",
485                          header[3] >> 16, header[3] & 0xffff);
486                 break;
487         default:
488                 specific[0] = '\0';
489         }
490
491         switch (tcode) {
492         case 0xa:
493                 dev_notice(ohci->card.device,
494                            "A%c %s, %s\n",
495                            dir, evts[evt], tcodes[tcode]);
496                 break;
497         case 0xe:
498                 dev_notice(ohci->card.device,
499                            "A%c %s, PHY %08x %08x\n",
500                            dir, evts[evt], header[1], header[2]);
501                 break;
502         case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
503                 dev_notice(ohci->card.device,
504                            "A%c spd %x tl %02x, "
505                            "%04x -> %04x, %s, "
506                            "%s, %04x%08x%s\n",
507                            dir, speed, header[0] >> 10 & 0x3f,
508                            header[1] >> 16, header[0] >> 16, evts[evt],
509                            tcodes[tcode], header[1] & 0xffff, header[2], specific);
510                 break;
511         default:
512                 dev_notice(ohci->card.device,
513                            "A%c spd %x tl %02x, "
514                            "%04x -> %04x, %s, "
515                            "%s%s\n",
516                            dir, speed, header[0] >> 10 & 0x3f,
517                            header[1] >> 16, header[0] >> 16, evts[evt],
518                            tcodes[tcode], specific);
519         }
520 }
521
522 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
523 {
524         writel(data, ohci->registers + offset);
525 }
526
527 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
528 {
529         return readl(ohci->registers + offset);
530 }
531
532 static inline void flush_writes(const struct fw_ohci *ohci)
533 {
534         /* Do a dummy read to flush writes. */
535         reg_read(ohci, OHCI1394_Version);
536 }
537
538 /*
539  * Beware!  read_phy_reg(), write_phy_reg(), update_phy_reg(), and
540  * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
541  * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
542  * directly.  Exceptions are intrinsically serialized contexts like pci_probe.
543  */
544 static int read_phy_reg(struct fw_ohci *ohci, int addr)
545 {
546         u32 val;
547         int i;
548
549         reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
550         for (i = 0; i < 3 + 100; i++) {
551                 val = reg_read(ohci, OHCI1394_PhyControl);
552                 if (!~val)
553                         return -ENODEV; /* Card was ejected. */
554
555                 if (val & OHCI1394_PhyControl_ReadDone)
556                         return OHCI1394_PhyControl_ReadData(val);
557
558                 /*
559                  * Try a few times without waiting.  Sleeping is necessary
560                  * only when the link/PHY interface is busy.
561                  */
562                 if (i >= 3)
563                         msleep(1);
564         }
565         dev_err(ohci->card.device, "failed to read phy reg\n");
566
567         return -EBUSY;
568 }
569
570 static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
571 {
572         int i;
573
574         reg_write(ohci, OHCI1394_PhyControl,
575                   OHCI1394_PhyControl_Write(addr, val));
576         for (i = 0; i < 3 + 100; i++) {
577                 val = reg_read(ohci, OHCI1394_PhyControl);
578                 if (!~val)
579                         return -ENODEV; /* Card was ejected. */
580
581                 if (!(val & OHCI1394_PhyControl_WritePending))
582                         return 0;
583
584                 if (i >= 3)
585                         msleep(1);
586         }
587         dev_err(ohci->card.device, "failed to write phy reg\n");
588
589         return -EBUSY;
590 }
591
592 static int update_phy_reg(struct fw_ohci *ohci, int addr,
593                           int clear_bits, int set_bits)
594 {
595         int ret = read_phy_reg(ohci, addr);
596         if (ret < 0)
597                 return ret;
598
599         /*
600          * The interrupt status bits are cleared by writing a one bit.
601          * Avoid clearing them unless explicitly requested in set_bits.
602          */
603         if (addr == 5)
604                 clear_bits |= PHY_INT_STATUS_BITS;
605
606         return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
607 }
608
609 static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
610 {
611         int ret;
612
613         ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
614         if (ret < 0)
615                 return ret;
616
617         return read_phy_reg(ohci, addr);
618 }
619
620 static int ohci_read_phy_reg(struct fw_card *card, int addr)
621 {
622         struct fw_ohci *ohci = fw_ohci(card);
623         int ret;
624
625         mutex_lock(&ohci->phy_reg_mutex);
626         ret = read_phy_reg(ohci, addr);
627         mutex_unlock(&ohci->phy_reg_mutex);
628
629         return ret;
630 }
631
632 static int ohci_update_phy_reg(struct fw_card *card, int addr,
633                                int clear_bits, int set_bits)
634 {
635         struct fw_ohci *ohci = fw_ohci(card);
636         int ret;
637
638         mutex_lock(&ohci->phy_reg_mutex);
639         ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
640         mutex_unlock(&ohci->phy_reg_mutex);
641
642         return ret;
643 }
644
645 static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
646 {
647         return page_private(ctx->pages[i]);
648 }
649
650 static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
651 {
652         struct descriptor *d;
653
654         d = &ctx->descriptors[index];
655         d->branch_address  &= cpu_to_le32(~0xf);
656         d->res_count       =  cpu_to_le16(PAGE_SIZE);
657         d->transfer_status =  0;
658
659         wmb(); /* finish init of new descriptors before branch_address update */
660         d = &ctx->descriptors[ctx->last_buffer_index];
661         d->branch_address  |= cpu_to_le32(1);
662
663         ctx->last_buffer_index = index;
664
665         reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
666 }
667
668 static void ar_context_release(struct ar_context *ctx)
669 {
670         unsigned int i;
671
672         if (ctx->buffer)
673                 vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
674
675         for (i = 0; i < AR_BUFFERS; i++)
676                 if (ctx->pages[i]) {
677                         dma_unmap_page(ctx->ohci->card.device,
678                                        ar_buffer_bus(ctx, i),
679                                        PAGE_SIZE, DMA_FROM_DEVICE);
680                         __free_page(ctx->pages[i]);
681                 }
682 }
683
684 static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
685 {
686         struct fw_ohci *ohci = ctx->ohci;
687
688         if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
689                 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
690                 flush_writes(ohci);
691
692                 dev_err(ohci->card.device, "AR error: %s; DMA stopped\n",
693                         error_msg);
694         }
695         /* FIXME: restart? */
696 }
697
698 static inline unsigned int ar_next_buffer_index(unsigned int index)
699 {
700         return (index + 1) % AR_BUFFERS;
701 }
702
703 static inline unsigned int ar_prev_buffer_index(unsigned int index)
704 {
705         return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
706 }
707
708 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
709 {
710         return ar_next_buffer_index(ctx->last_buffer_index);
711 }
712
713 /*
714  * We search for the buffer that contains the last AR packet DMA data written
715  * by the controller.
716  */
717 static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
718                                                  unsigned int *buffer_offset)
719 {
720         unsigned int i, next_i, last = ctx->last_buffer_index;
721         __le16 res_count, next_res_count;
722
723         i = ar_first_buffer_index(ctx);
724         res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
725
726         /* A buffer that is not yet completely filled must be the last one. */
727         while (i != last && res_count == 0) {
728
729                 /* Peek at the next descriptor. */
730                 next_i = ar_next_buffer_index(i);
731                 rmb(); /* read descriptors in order */
732                 next_res_count = ACCESS_ONCE(
733                                 ctx->descriptors[next_i].res_count);
734                 /*
735                  * If the next descriptor is still empty, we must stop at this
736                  * descriptor.
737                  */
738                 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
739                         /*
740                          * The exception is when the DMA data for one packet is
741                          * split over three buffers; in this case, the middle
742                          * buffer's descriptor might be never updated by the
743                          * controller and look still empty, and we have to peek
744                          * at the third one.
745                          */
746                         if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
747                                 next_i = ar_next_buffer_index(next_i);
748                                 rmb();
749                                 next_res_count = ACCESS_ONCE(
750                                         ctx->descriptors[next_i].res_count);
751                                 if (next_res_count != cpu_to_le16(PAGE_SIZE))
752                                         goto next_buffer_is_active;
753                         }
754
755                         break;
756                 }
757
758 next_buffer_is_active:
759                 i = next_i;
760                 res_count = next_res_count;
761         }
762
763         rmb(); /* read res_count before the DMA data */
764
765         *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
766         if (*buffer_offset > PAGE_SIZE) {
767                 *buffer_offset = 0;
768                 ar_context_abort(ctx, "corrupted descriptor");
769         }
770
771         return i;
772 }
773
774 static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
775                                     unsigned int end_buffer_index,
776                                     unsigned int end_buffer_offset)
777 {
778         unsigned int i;
779
780         i = ar_first_buffer_index(ctx);
781         while (i != end_buffer_index) {
782                 dma_sync_single_for_cpu(ctx->ohci->card.device,
783                                         ar_buffer_bus(ctx, i),
784                                         PAGE_SIZE, DMA_FROM_DEVICE);
785                 i = ar_next_buffer_index(i);
786         }
787         if (end_buffer_offset > 0)
788                 dma_sync_single_for_cpu(ctx->ohci->card.device,
789                                         ar_buffer_bus(ctx, i),
790                                         end_buffer_offset, DMA_FROM_DEVICE);
791 }
792
793 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
794 #define cond_le32_to_cpu(v) \
795         (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
796 #else
797 #define cond_le32_to_cpu(v) le32_to_cpu(v)
798 #endif
799
800 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
801 {
802         struct fw_ohci *ohci = ctx->ohci;
803         struct fw_packet p;
804         u32 status, length, tcode;
805         int evt;
806
807         p.header[0] = cond_le32_to_cpu(buffer[0]);
808         p.header[1] = cond_le32_to_cpu(buffer[1]);
809         p.header[2] = cond_le32_to_cpu(buffer[2]);
810
811         tcode = (p.header[0] >> 4) & 0x0f;
812         switch (tcode) {
813         case TCODE_WRITE_QUADLET_REQUEST:
814         case TCODE_READ_QUADLET_RESPONSE:
815                 p.header[3] = (__force __u32) buffer[3];
816                 p.header_length = 16;
817                 p.payload_length = 0;
818                 break;
819
820         case TCODE_READ_BLOCK_REQUEST :
821                 p.header[3] = cond_le32_to_cpu(buffer[3]);
822                 p.header_length = 16;
823                 p.payload_length = 0;
824                 break;
825
826         case TCODE_WRITE_BLOCK_REQUEST:
827         case TCODE_READ_BLOCK_RESPONSE:
828         case TCODE_LOCK_REQUEST:
829         case TCODE_LOCK_RESPONSE:
830                 p.header[3] = cond_le32_to_cpu(buffer[3]);
831                 p.header_length = 16;
832                 p.payload_length = p.header[3] >> 16;
833                 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
834                         ar_context_abort(ctx, "invalid packet length");
835                         return NULL;
836                 }
837                 break;
838
839         case TCODE_WRITE_RESPONSE:
840         case TCODE_READ_QUADLET_REQUEST:
841         case OHCI_TCODE_PHY_PACKET:
842                 p.header_length = 12;
843                 p.payload_length = 0;
844                 break;
845
846         default:
847                 ar_context_abort(ctx, "invalid tcode");
848                 return NULL;
849         }
850
851         p.payload = (void *) buffer + p.header_length;
852
853         /* FIXME: What to do about evt_* errors? */
854         length = (p.header_length + p.payload_length + 3) / 4;
855         status = cond_le32_to_cpu(buffer[length]);
856         evt    = (status >> 16) & 0x1f;
857
858         p.ack        = evt - 16;
859         p.speed      = (status >> 21) & 0x7;
860         p.timestamp  = status & 0xffff;
861         p.generation = ohci->request_generation;
862
863         log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
864
865         /*
866          * Several controllers, notably from NEC and VIA, forget to
867          * write ack_complete status at PHY packet reception.
868          */
869         if (evt == OHCI1394_evt_no_status &&
870             (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
871                 p.ack = ACK_COMPLETE;
872
873         /*
874          * The OHCI bus reset handler synthesizes a PHY packet with
875          * the new generation number when a bus reset happens (see
876          * section 8.4.2.3).  This helps us determine when a request
877          * was received and make sure we send the response in the same
878          * generation.  We only need this for requests; for responses
879          * we use the unique tlabel for finding the matching
880          * request.
881          *
882          * Alas some chips sometimes emit bus reset packets with a
883          * wrong generation.  We set the correct generation for these
884          * at a slightly incorrect time (in bus_reset_work).
885          */
886         if (evt == OHCI1394_evt_bus_reset) {
887                 if (!(ohci->quirks & QUIRK_RESET_PACKET))
888                         ohci->request_generation = (p.header[2] >> 16) & 0xff;
889         } else if (ctx == &ohci->ar_request_ctx) {
890                 fw_core_handle_request(&ohci->card, &p);
891         } else {
892                 fw_core_handle_response(&ohci->card, &p);
893         }
894
895         return buffer + length + 1;
896 }
897
898 static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
899 {
900         void *next;
901
902         while (p < end) {
903                 next = handle_ar_packet(ctx, p);
904                 if (!next)
905                         return p;
906                 p = next;
907         }
908
909         return p;
910 }
911
912 static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
913 {
914         unsigned int i;
915
916         i = ar_first_buffer_index(ctx);
917         while (i != end_buffer) {
918                 dma_sync_single_for_device(ctx->ohci->card.device,
919                                            ar_buffer_bus(ctx, i),
920                                            PAGE_SIZE, DMA_FROM_DEVICE);
921                 ar_context_link_page(ctx, i);
922                 i = ar_next_buffer_index(i);
923         }
924 }
925
926 static void ar_context_tasklet(unsigned long data)
927 {
928         struct ar_context *ctx = (struct ar_context *)data;
929         unsigned int end_buffer_index, end_buffer_offset;
930         void *p, *end;
931
932         p = ctx->pointer;
933         if (!p)
934                 return;
935
936         end_buffer_index = ar_search_last_active_buffer(ctx,
937                                                         &end_buffer_offset);
938         ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
939         end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
940
941         if (end_buffer_index < ar_first_buffer_index(ctx)) {
942                 /*
943                  * The filled part of the overall buffer wraps around; handle
944                  * all packets up to the buffer end here.  If the last packet
945                  * wraps around, its tail will be visible after the buffer end
946                  * because the buffer start pages are mapped there again.
947                  */
948                 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
949                 p = handle_ar_packets(ctx, p, buffer_end);
950                 if (p < buffer_end)
951                         goto error;
952                 /* adjust p to point back into the actual buffer */
953                 p -= AR_BUFFERS * PAGE_SIZE;
954         }
955
956         p = handle_ar_packets(ctx, p, end);
957         if (p != end) {
958                 if (p > end)
959                         ar_context_abort(ctx, "inconsistent descriptor");
960                 goto error;
961         }
962
963         ctx->pointer = p;
964         ar_recycle_buffers(ctx, end_buffer_index);
965
966         return;
967
968 error:
969         ctx->pointer = NULL;
970 }
971
972 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
973                            unsigned int descriptors_offset, u32 regs)
974 {
975         unsigned int i;
976         dma_addr_t dma_addr;
977         struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
978         struct descriptor *d;
979
980         ctx->regs        = regs;
981         ctx->ohci        = ohci;
982         tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
983
984         for (i = 0; i < AR_BUFFERS; i++) {
985                 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
986                 if (!ctx->pages[i])
987                         goto out_of_memory;
988                 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
989                                         0, PAGE_SIZE, DMA_FROM_DEVICE);
990                 if (dma_mapping_error(ohci->card.device, dma_addr)) {
991                         __free_page(ctx->pages[i]);
992                         ctx->pages[i] = NULL;
993                         goto out_of_memory;
994                 }
995                 set_page_private(ctx->pages[i], dma_addr);
996         }
997
998         for (i = 0; i < AR_BUFFERS; i++)
999                 pages[i]              = ctx->pages[i];
1000         for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
1001                 pages[AR_BUFFERS + i] = ctx->pages[i];
1002         ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
1003                                  -1, PAGE_KERNEL);
1004         if (!ctx->buffer)
1005                 goto out_of_memory;
1006
1007         ctx->descriptors     = ohci->misc_buffer     + descriptors_offset;
1008         ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
1009
1010         for (i = 0; i < AR_BUFFERS; i++) {
1011                 d = &ctx->descriptors[i];
1012                 d->req_count      = cpu_to_le16(PAGE_SIZE);
1013                 d->control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
1014                                                 DESCRIPTOR_STATUS |
1015                                                 DESCRIPTOR_BRANCH_ALWAYS);
1016                 d->data_address   = cpu_to_le32(ar_buffer_bus(ctx, i));
1017                 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
1018                         ar_next_buffer_index(i) * sizeof(struct descriptor));
1019         }
1020
1021         return 0;
1022
1023 out_of_memory:
1024         ar_context_release(ctx);
1025
1026         return -ENOMEM;
1027 }
1028
1029 static void ar_context_run(struct ar_context *ctx)
1030 {
1031         unsigned int i;
1032
1033         for (i = 0; i < AR_BUFFERS; i++)
1034                 ar_context_link_page(ctx, i);
1035
1036         ctx->pointer = ctx->buffer;
1037
1038         reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
1039         reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
1040 }
1041
1042 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
1043 {
1044         __le16 branch;
1045
1046         branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
1047
1048         /* figure out which descriptor the branch address goes in */
1049         if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
1050                 return d;
1051         else
1052                 return d + z - 1;
1053 }
1054
1055 static void context_tasklet(unsigned long data)
1056 {
1057         struct context *ctx = (struct context *) data;
1058         struct descriptor *d, *last;
1059         u32 address;
1060         int z;
1061         struct descriptor_buffer *desc;
1062
1063         desc = list_entry(ctx->buffer_list.next,
1064                         struct descriptor_buffer, list);
1065         last = ctx->last;
1066         while (last->branch_address != 0) {
1067                 struct descriptor_buffer *old_desc = desc;
1068                 address = le32_to_cpu(last->branch_address);
1069                 z = address & 0xf;
1070                 address &= ~0xf;
1071                 ctx->current_bus = address;
1072
1073                 /* If the branch address points to a buffer outside of the
1074                  * current buffer, advance to the next buffer. */
1075                 if (address < desc->buffer_bus ||
1076                                 address >= desc->buffer_bus + desc->used)
1077                         desc = list_entry(desc->list.next,
1078                                         struct descriptor_buffer, list);
1079                 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
1080                 last = find_branch_descriptor(d, z);
1081
1082                 if (!ctx->callback(ctx, d, last))
1083                         break;
1084
1085                 if (old_desc != desc) {
1086                         /* If we've advanced to the next buffer, move the
1087                          * previous buffer to the free list. */
1088                         unsigned long flags;
1089                         old_desc->used = 0;
1090                         spin_lock_irqsave(&ctx->ohci->lock, flags);
1091                         list_move_tail(&old_desc->list, &ctx->buffer_list);
1092                         spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1093                 }
1094                 ctx->last = last;
1095         }
1096 }
1097
1098 /*
1099  * Allocate a new buffer and add it to the list of free buffers for this
1100  * context.  Must be called with ohci->lock held.
1101  */
1102 static int context_add_buffer(struct context *ctx)
1103 {
1104         struct descriptor_buffer *desc;
1105         dma_addr_t uninitialized_var(bus_addr);
1106         int offset;
1107
1108         /*
1109          * 16MB of descriptors should be far more than enough for any DMA
1110          * program.  This will catch run-away userspace or DoS attacks.
1111          */
1112         if (ctx->total_allocation >= 16*1024*1024)
1113                 return -ENOMEM;
1114
1115         desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
1116                         &bus_addr, GFP_ATOMIC);
1117         if (!desc)
1118                 return -ENOMEM;
1119
1120         offset = (void *)&desc->buffer - (void *)desc;
1121         desc->buffer_size = PAGE_SIZE - offset;
1122         desc->buffer_bus = bus_addr + offset;
1123         desc->used = 0;
1124
1125         list_add_tail(&desc->list, &ctx->buffer_list);
1126         ctx->total_allocation += PAGE_SIZE;
1127
1128         return 0;
1129 }
1130
1131 static int context_init(struct context *ctx, struct fw_ohci *ohci,
1132                         u32 regs, descriptor_callback_t callback)
1133 {
1134         ctx->ohci = ohci;
1135         ctx->regs = regs;
1136         ctx->total_allocation = 0;
1137
1138         INIT_LIST_HEAD(&ctx->buffer_list);
1139         if (context_add_buffer(ctx) < 0)
1140                 return -ENOMEM;
1141
1142         ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1143                         struct descriptor_buffer, list);
1144
1145         tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
1146         ctx->callback = callback;
1147
1148         /*
1149          * We put a dummy descriptor in the buffer that has a NULL
1150          * branch address and looks like it's been sent.  That way we
1151          * have a descriptor to append DMA programs to.
1152          */
1153         memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1154         ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1155         ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1156         ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1157         ctx->last = ctx->buffer_tail->buffer;
1158         ctx->prev = ctx->buffer_tail->buffer;
1159
1160         return 0;
1161 }
1162
1163 static void context_release(struct context *ctx)
1164 {
1165         struct fw_card *card = &ctx->ohci->card;
1166         struct descriptor_buffer *desc, *tmp;
1167
1168         list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
1169                 dma_free_coherent(card->device, PAGE_SIZE, desc,
1170                         desc->buffer_bus -
1171                         ((void *)&desc->buffer - (void *)desc));
1172 }
1173
1174 /* Must be called with ohci->lock held */
1175 static struct descriptor *context_get_descriptors(struct context *ctx,
1176                                                   int z, dma_addr_t *d_bus)
1177 {
1178         struct descriptor *d = NULL;
1179         struct descriptor_buffer *desc = ctx->buffer_tail;
1180
1181         if (z * sizeof(*d) > desc->buffer_size)
1182                 return NULL;
1183
1184         if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1185                 /* No room for the descriptor in this buffer, so advance to the
1186                  * next one. */
1187
1188                 if (desc->list.next == &ctx->buffer_list) {
1189                         /* If there is no free buffer next in the list,
1190                          * allocate one. */
1191                         if (context_add_buffer(ctx) < 0)
1192                                 return NULL;
1193                 }
1194                 desc = list_entry(desc->list.next,
1195                                 struct descriptor_buffer, list);
1196                 ctx->buffer_tail = desc;
1197         }
1198
1199         d = desc->buffer + desc->used / sizeof(*d);
1200         memset(d, 0, z * sizeof(*d));
1201         *d_bus = desc->buffer_bus + desc->used;
1202
1203         return d;
1204 }
1205
1206 static void context_run(struct context *ctx, u32 extra)
1207 {
1208         struct fw_ohci *ohci = ctx->ohci;
1209
1210         reg_write(ohci, COMMAND_PTR(ctx->regs),
1211                   le32_to_cpu(ctx->last->branch_address));
1212         reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1213         reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1214         ctx->running = true;
1215         flush_writes(ohci);
1216 }
1217
1218 static void context_append(struct context *ctx,
1219                            struct descriptor *d, int z, int extra)
1220 {
1221         dma_addr_t d_bus;
1222         struct descriptor_buffer *desc = ctx->buffer_tail;
1223
1224         d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1225
1226         desc->used += (z + extra) * sizeof(*d);
1227
1228         wmb(); /* finish init of new descriptors before branch_address update */
1229         ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1230         ctx->prev = find_branch_descriptor(d, z);
1231 }
1232
1233 static void context_stop(struct context *ctx)
1234 {
1235         struct fw_ohci *ohci = ctx->ohci;
1236         u32 reg;
1237         int i;
1238
1239         reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1240         ctx->running = false;
1241
1242         for (i = 0; i < 1000; i++) {
1243                 reg = reg_read(ohci, CONTROL_SET(ctx->regs));
1244                 if ((reg & CONTEXT_ACTIVE) == 0)
1245                         return;
1246
1247                 if (i)
1248                         udelay(10);
1249         }
1250         dev_err(ohci->card.device, "DMA context still active (0x%08x)\n", reg);
1251 }
1252
1253 struct driver_data {
1254         u8 inline_data[8];
1255         struct fw_packet *packet;
1256 };
1257
1258 /*
1259  * This function apppends a packet to the DMA queue for transmission.
1260  * Must always be called with the ochi->lock held to ensure proper
1261  * generation handling and locking around packet queue manipulation.
1262  */
1263 static int at_context_queue_packet(struct context *ctx,
1264                                    struct fw_packet *packet)
1265 {
1266         struct fw_ohci *ohci = ctx->ohci;
1267         dma_addr_t d_bus, uninitialized_var(payload_bus);
1268         struct driver_data *driver_data;
1269         struct descriptor *d, *last;
1270         __le32 *header;
1271         int z, tcode;
1272
1273         d = context_get_descriptors(ctx, 4, &d_bus);
1274         if (d == NULL) {
1275                 packet->ack = RCODE_SEND_ERROR;
1276                 return -1;
1277         }
1278
1279         d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1280         d[0].res_count = cpu_to_le16(packet->timestamp);
1281
1282         /*
1283          * The DMA format for asyncronous link packets is different
1284          * from the IEEE1394 layout, so shift the fields around
1285          * accordingly.
1286          */
1287
1288         tcode = (packet->header[0] >> 4) & 0x0f;
1289         header = (__le32 *) &d[1];
1290         switch (tcode) {
1291         case TCODE_WRITE_QUADLET_REQUEST:
1292         case TCODE_WRITE_BLOCK_REQUEST:
1293         case TCODE_WRITE_RESPONSE:
1294         case TCODE_READ_QUADLET_REQUEST:
1295         case TCODE_READ_BLOCK_REQUEST:
1296         case TCODE_READ_QUADLET_RESPONSE:
1297         case TCODE_READ_BLOCK_RESPONSE:
1298         case TCODE_LOCK_REQUEST:
1299         case TCODE_LOCK_RESPONSE:
1300                 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1301                                         (packet->speed << 16));
1302                 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1303                                         (packet->header[0] & 0xffff0000));
1304                 header[2] = cpu_to_le32(packet->header[2]);
1305
1306                 if (TCODE_IS_BLOCK_PACKET(tcode))
1307                         header[3] = cpu_to_le32(packet->header[3]);
1308                 else
1309                         header[3] = (__force __le32) packet->header[3];
1310
1311                 d[0].req_count = cpu_to_le16(packet->header_length);
1312                 break;
1313
1314         case TCODE_LINK_INTERNAL:
1315                 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1316                                         (packet->speed << 16));
1317                 header[1] = cpu_to_le32(packet->header[1]);
1318                 header[2] = cpu_to_le32(packet->header[2]);
1319                 d[0].req_count = cpu_to_le16(12);
1320
1321                 if (is_ping_packet(&packet->header[1]))
1322                         d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1323                 break;
1324
1325         case TCODE_STREAM_DATA:
1326                 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1327                                         (packet->speed << 16));
1328                 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
1329                 d[0].req_count = cpu_to_le16(8);
1330                 break;
1331
1332         default:
1333                 /* BUG(); */
1334                 packet->ack = RCODE_SEND_ERROR;
1335                 return -1;
1336         }
1337
1338         BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1339         driver_data = (struct driver_data *) &d[3];
1340         driver_data->packet = packet;
1341         packet->driver_data = driver_data;
1342
1343         if (packet->payload_length > 0) {
1344                 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1345                         payload_bus = dma_map_single(ohci->card.device,
1346                                                      packet->payload,
1347                                                      packet->payload_length,
1348                                                      DMA_TO_DEVICE);
1349                         if (dma_mapping_error(ohci->card.device, payload_bus)) {
1350                                 packet->ack = RCODE_SEND_ERROR;
1351                                 return -1;
1352                         }
1353                         packet->payload_bus     = payload_bus;
1354                         packet->payload_mapped  = true;
1355                 } else {
1356                         memcpy(driver_data->inline_data, packet->payload,
1357                                packet->payload_length);
1358                         payload_bus = d_bus + 3 * sizeof(*d);
1359                 }
1360
1361                 d[2].req_count    = cpu_to_le16(packet->payload_length);
1362                 d[2].data_address = cpu_to_le32(payload_bus);
1363                 last = &d[2];
1364                 z = 3;
1365         } else {
1366                 last = &d[0];
1367                 z = 2;
1368         }
1369
1370         last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1371                                      DESCRIPTOR_IRQ_ALWAYS |
1372                                      DESCRIPTOR_BRANCH_ALWAYS);
1373
1374         /* FIXME: Document how the locking works. */
1375         if (ohci->generation != packet->generation) {
1376                 if (packet->payload_mapped)
1377                         dma_unmap_single(ohci->card.device, payload_bus,
1378                                          packet->payload_length, DMA_TO_DEVICE);
1379                 packet->ack = RCODE_GENERATION;
1380                 return -1;
1381         }
1382
1383         context_append(ctx, d, z, 4 - z);
1384
1385         if (ctx->running)
1386                 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1387         else
1388                 context_run(ctx, 0);
1389
1390         return 0;
1391 }
1392
1393 static void at_context_flush(struct context *ctx)
1394 {
1395         tasklet_disable(&ctx->tasklet);
1396
1397         ctx->flushing = true;
1398         context_tasklet((unsigned long)ctx);
1399         ctx->flushing = false;
1400
1401         tasklet_enable(&ctx->tasklet);
1402 }
1403
1404 static int handle_at_packet(struct context *context,
1405                             struct descriptor *d,
1406                             struct descriptor *last)
1407 {
1408         struct driver_data *driver_data;
1409         struct fw_packet *packet;
1410         struct fw_ohci *ohci = context->ohci;
1411         int evt;
1412
1413         if (last->transfer_status == 0 && !context->flushing)
1414                 /* This descriptor isn't done yet, stop iteration. */
1415                 return 0;
1416
1417         driver_data = (struct driver_data *) &d[3];
1418         packet = driver_data->packet;
1419         if (packet == NULL)
1420                 /* This packet was cancelled, just continue. */
1421                 return 1;
1422
1423         if (packet->payload_mapped)
1424                 dma_unmap_single(ohci->card.device, packet->payload_bus,
1425                                  packet->payload_length, DMA_TO_DEVICE);
1426
1427         evt = le16_to_cpu(last->transfer_status) & 0x1f;
1428         packet->timestamp = le16_to_cpu(last->res_count);
1429
1430         log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
1431
1432         switch (evt) {
1433         case OHCI1394_evt_timeout:
1434                 /* Async response transmit timed out. */
1435                 packet->ack = RCODE_CANCELLED;
1436                 break;
1437
1438         case OHCI1394_evt_flushed:
1439                 /*
1440                  * The packet was flushed should give same error as
1441                  * when we try to use a stale generation count.
1442                  */
1443                 packet->ack = RCODE_GENERATION;
1444                 break;
1445
1446         case OHCI1394_evt_missing_ack:
1447                 if (context->flushing)
1448                         packet->ack = RCODE_GENERATION;
1449                 else {
1450                         /*
1451                          * Using a valid (current) generation count, but the
1452                          * node is not on the bus or not sending acks.
1453                          */
1454                         packet->ack = RCODE_NO_ACK;
1455                 }
1456                 break;
1457
1458         case ACK_COMPLETE + 0x10:
1459         case ACK_PENDING + 0x10:
1460         case ACK_BUSY_X + 0x10:
1461         case ACK_BUSY_A + 0x10:
1462         case ACK_BUSY_B + 0x10:
1463         case ACK_DATA_ERROR + 0x10:
1464         case ACK_TYPE_ERROR + 0x10:
1465                 packet->ack = evt - 0x10;
1466                 break;
1467
1468         case OHCI1394_evt_no_status:
1469                 if (context->flushing) {
1470                         packet->ack = RCODE_GENERATION;
1471                         break;
1472                 }
1473                 /* fall through */
1474
1475         default:
1476                 packet->ack = RCODE_SEND_ERROR;
1477                 break;
1478         }
1479
1480         packet->callback(packet, &ohci->card, packet->ack);
1481
1482         return 1;
1483 }
1484
1485 #define HEADER_GET_DESTINATION(q)       (((q) >> 16) & 0xffff)
1486 #define HEADER_GET_TCODE(q)             (((q) >> 4) & 0x0f)
1487 #define HEADER_GET_OFFSET_HIGH(q)       (((q) >> 0) & 0xffff)
1488 #define HEADER_GET_DATA_LENGTH(q)       (((q) >> 16) & 0xffff)
1489 #define HEADER_GET_EXTENDED_TCODE(q)    (((q) >> 0) & 0xffff)
1490
1491 static void handle_local_rom(struct fw_ohci *ohci,
1492                              struct fw_packet *packet, u32 csr)
1493 {
1494         struct fw_packet response;
1495         int tcode, length, i;
1496
1497         tcode = HEADER_GET_TCODE(packet->header[0]);
1498         if (TCODE_IS_BLOCK_PACKET(tcode))
1499                 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1500         else
1501                 length = 4;
1502
1503         i = csr - CSR_CONFIG_ROM;
1504         if (i + length > CONFIG_ROM_SIZE) {
1505                 fw_fill_response(&response, packet->header,
1506                                  RCODE_ADDRESS_ERROR, NULL, 0);
1507         } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1508                 fw_fill_response(&response, packet->header,
1509                                  RCODE_TYPE_ERROR, NULL, 0);
1510         } else {
1511                 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1512                                  (void *) ohci->config_rom + i, length);
1513         }
1514
1515         fw_core_handle_response(&ohci->card, &response);
1516 }
1517
1518 static void handle_local_lock(struct fw_ohci *ohci,
1519                               struct fw_packet *packet, u32 csr)
1520 {
1521         struct fw_packet response;
1522         int tcode, length, ext_tcode, sel, try;
1523         __be32 *payload, lock_old;
1524         u32 lock_arg, lock_data;
1525
1526         tcode = HEADER_GET_TCODE(packet->header[0]);
1527         length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1528         payload = packet->payload;
1529         ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1530
1531         if (tcode == TCODE_LOCK_REQUEST &&
1532             ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1533                 lock_arg = be32_to_cpu(payload[0]);
1534                 lock_data = be32_to_cpu(payload[1]);
1535         } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1536                 lock_arg = 0;
1537                 lock_data = 0;
1538         } else {
1539                 fw_fill_response(&response, packet->header,
1540                                  RCODE_TYPE_ERROR, NULL, 0);
1541                 goto out;
1542         }
1543
1544         sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1545         reg_write(ohci, OHCI1394_CSRData, lock_data);
1546         reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1547         reg_write(ohci, OHCI1394_CSRControl, sel);
1548
1549         for (try = 0; try < 20; try++)
1550                 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1551                         lock_old = cpu_to_be32(reg_read(ohci,
1552                                                         OHCI1394_CSRData));
1553                         fw_fill_response(&response, packet->header,
1554                                          RCODE_COMPLETE,
1555                                          &lock_old, sizeof(lock_old));
1556                         goto out;
1557                 }
1558
1559         dev_err(ohci->card.device, "swap not done (CSR lock timeout)\n");
1560         fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1561
1562  out:
1563         fw_core_handle_response(&ohci->card, &response);
1564 }
1565
1566 static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1567 {
1568         u64 offset, csr;
1569
1570         if (ctx == &ctx->ohci->at_request_ctx) {
1571                 packet->ack = ACK_PENDING;
1572                 packet->callback(packet, &ctx->ohci->card, packet->ack);
1573         }
1574
1575         offset =
1576                 ((unsigned long long)
1577                  HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1578                 packet->header[2];
1579         csr = offset - CSR_REGISTER_BASE;
1580
1581         /* Handle config rom reads. */
1582         if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1583                 handle_local_rom(ctx->ohci, packet, csr);
1584         else switch (csr) {
1585         case CSR_BUS_MANAGER_ID:
1586         case CSR_BANDWIDTH_AVAILABLE:
1587         case CSR_CHANNELS_AVAILABLE_HI:
1588         case CSR_CHANNELS_AVAILABLE_LO:
1589                 handle_local_lock(ctx->ohci, packet, csr);
1590                 break;
1591         default:
1592                 if (ctx == &ctx->ohci->at_request_ctx)
1593                         fw_core_handle_request(&ctx->ohci->card, packet);
1594                 else
1595                         fw_core_handle_response(&ctx->ohci->card, packet);
1596                 break;
1597         }
1598
1599         if (ctx == &ctx->ohci->at_response_ctx) {
1600                 packet->ack = ACK_COMPLETE;
1601                 packet->callback(packet, &ctx->ohci->card, packet->ack);
1602         }
1603 }
1604
1605 static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1606 {
1607         unsigned long flags;
1608         int ret;
1609
1610         spin_lock_irqsave(&ctx->ohci->lock, flags);
1611
1612         if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1613             ctx->ohci->generation == packet->generation) {
1614                 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1615                 handle_local_request(ctx, packet);
1616                 return;
1617         }
1618
1619         ret = at_context_queue_packet(ctx, packet);
1620         spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1621
1622         if (ret < 0)
1623                 packet->callback(packet, &ctx->ohci->card, packet->ack);
1624
1625 }
1626
1627 static void detect_dead_context(struct fw_ohci *ohci,
1628                                 const char *name, unsigned int regs)
1629 {
1630         u32 ctl;
1631
1632         ctl = reg_read(ohci, CONTROL_SET(regs));
1633         if (ctl & CONTEXT_DEAD)
1634                 dev_err(ohci->card.device,
1635                         "DMA context %s has stopped, error code: %s\n",
1636                         name, evts[ctl & 0x1f]);
1637 }
1638
1639 static void handle_dead_contexts(struct fw_ohci *ohci)
1640 {
1641         unsigned int i;
1642         char name[8];
1643
1644         detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1645         detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1646         detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1647         detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1648         for (i = 0; i < 32; ++i) {
1649                 if (!(ohci->it_context_support & (1 << i)))
1650                         continue;
1651                 sprintf(name, "IT%u", i);
1652                 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1653         }
1654         for (i = 0; i < 32; ++i) {
1655                 if (!(ohci->ir_context_support & (1 << i)))
1656                         continue;
1657                 sprintf(name, "IR%u", i);
1658                 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1659         }
1660         /* TODO: maybe try to flush and restart the dead contexts */
1661 }
1662
1663 static u32 cycle_timer_ticks(u32 cycle_timer)
1664 {
1665         u32 ticks;
1666
1667         ticks = cycle_timer & 0xfff;
1668         ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1669         ticks += (3072 * 8000) * (cycle_timer >> 25);
1670
1671         return ticks;
1672 }
1673
1674 /*
1675  * Some controllers exhibit one or more of the following bugs when updating the
1676  * iso cycle timer register:
1677  *  - When the lowest six bits are wrapping around to zero, a read that happens
1678  *    at the same time will return garbage in the lowest ten bits.
1679  *  - When the cycleOffset field wraps around to zero, the cycleCount field is
1680  *    not incremented for about 60 ns.
1681  *  - Occasionally, the entire register reads zero.
1682  *
1683  * To catch these, we read the register three times and ensure that the
1684  * difference between each two consecutive reads is approximately the same, i.e.
1685  * less than twice the other.  Furthermore, any negative difference indicates an
1686  * error.  (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1687  * execute, so we have enough precision to compute the ratio of the differences.)
1688  */
1689 static u32 get_cycle_time(struct fw_ohci *ohci)
1690 {
1691         u32 c0, c1, c2;
1692         u32 t0, t1, t2;
1693         s32 diff01, diff12;
1694         int i;
1695
1696         c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1697
1698         if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1699                 i = 0;
1700                 c1 = c2;
1701                 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1702                 do {
1703                         c0 = c1;
1704                         c1 = c2;
1705                         c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1706                         t0 = cycle_timer_ticks(c0);
1707                         t1 = cycle_timer_ticks(c1);
1708                         t2 = cycle_timer_ticks(c2);
1709                         diff01 = t1 - t0;
1710                         diff12 = t2 - t1;
1711                 } while ((diff01 <= 0 || diff12 <= 0 ||
1712                           diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1713                          && i++ < 20);
1714         }
1715
1716         return c2;
1717 }
1718
1719 /*
1720  * This function has to be called at least every 64 seconds.  The bus_time
1721  * field stores not only the upper 25 bits of the BUS_TIME register but also
1722  * the most significant bit of the cycle timer in bit 6 so that we can detect
1723  * changes in this bit.
1724  */
1725 static u32 update_bus_time(struct fw_ohci *ohci)
1726 {
1727         u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1728
1729         if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1730                 ohci->bus_time += 0x40;
1731
1732         return ohci->bus_time | cycle_time_seconds;
1733 }
1734
1735 static int get_status_for_port(struct fw_ohci *ohci, int port_index)
1736 {
1737         int reg;
1738
1739         mutex_lock(&ohci->phy_reg_mutex);
1740         reg = write_phy_reg(ohci, 7, port_index);
1741         if (reg >= 0)
1742                 reg = read_phy_reg(ohci, 8);
1743         mutex_unlock(&ohci->phy_reg_mutex);
1744         if (reg < 0)
1745                 return reg;
1746
1747         switch (reg & 0x0f) {
1748         case 0x06:
1749                 return 2;       /* is child node (connected to parent node) */
1750         case 0x0e:
1751                 return 3;       /* is parent node (connected to child node) */
1752         }
1753         return 1;               /* not connected */
1754 }
1755
1756 static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1757         int self_id_count)
1758 {
1759         int i;
1760         u32 entry;
1761
1762         for (i = 0; i < self_id_count; i++) {
1763                 entry = ohci->self_id_buffer[i];
1764                 if ((self_id & 0xff000000) == (entry & 0xff000000))
1765                         return -1;
1766                 if ((self_id & 0xff000000) < (entry & 0xff000000))
1767                         return i;
1768         }
1769         return i;
1770 }
1771
1772 /*
1773  * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
1774  * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
1775  * Construct the selfID from phy register contents.
1776  * FIXME:  How to determine the selfID.i flag?
1777  */
1778 static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1779 {
1780         int reg, i, pos, status;
1781         /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
1782         u32 self_id = 0x8040c800;
1783
1784         reg = reg_read(ohci, OHCI1394_NodeID);
1785         if (!(reg & OHCI1394_NodeID_idValid)) {
1786                 dev_notice(ohci->card.device,
1787                            "node ID not valid, new bus reset in progress\n");
1788                 return -EBUSY;
1789         }
1790         self_id |= ((reg & 0x3f) << 24); /* phy ID */
1791
1792         reg = ohci_read_phy_reg(&ohci->card, 4);
1793         if (reg < 0)
1794                 return reg;
1795         self_id |= ((reg & 0x07) << 8); /* power class */
1796
1797         reg = ohci_read_phy_reg(&ohci->card, 1);
1798         if (reg < 0)
1799                 return reg;
1800         self_id |= ((reg & 0x3f) << 16); /* gap count */
1801
1802         for (i = 0; i < 3; i++) {
1803                 status = get_status_for_port(ohci, i);
1804                 if (status < 0)
1805                         return status;
1806                 self_id |= ((status & 0x3) << (6 - (i * 2)));
1807         }
1808
1809         pos = get_self_id_pos(ohci, self_id, self_id_count);
1810         if (pos >= 0) {
1811                 memmove(&(ohci->self_id_buffer[pos+1]),
1812                         &(ohci->self_id_buffer[pos]),
1813                         (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
1814                 ohci->self_id_buffer[pos] = self_id;
1815                 self_id_count++;
1816         }
1817         return self_id_count;
1818 }
1819
1820 static void bus_reset_work(struct work_struct *work)
1821 {
1822         struct fw_ohci *ohci =
1823                 container_of(work, struct fw_ohci, bus_reset_work);
1824         int self_id_count, i, j, reg;
1825         int generation, new_generation;
1826         unsigned long flags;
1827         void *free_rom = NULL;
1828         dma_addr_t free_rom_bus = 0;
1829         bool is_new_root;
1830
1831         reg = reg_read(ohci, OHCI1394_NodeID);
1832         if (!(reg & OHCI1394_NodeID_idValid)) {
1833                 dev_notice(ohci->card.device,
1834                            "node ID not valid, new bus reset in progress\n");
1835                 return;
1836         }
1837         if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1838                 dev_notice(ohci->card.device, "malconfigured bus\n");
1839                 return;
1840         }
1841         ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1842                                OHCI1394_NodeID_nodeNumber);
1843
1844         is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1845         if (!(ohci->is_root && is_new_root))
1846                 reg_write(ohci, OHCI1394_LinkControlSet,
1847                           OHCI1394_LinkControl_cycleMaster);
1848         ohci->is_root = is_new_root;
1849
1850         reg = reg_read(ohci, OHCI1394_SelfIDCount);
1851         if (reg & OHCI1394_SelfIDCount_selfIDError) {
1852                 dev_notice(ohci->card.device, "inconsistent self IDs\n");
1853                 return;
1854         }
1855         /*
1856          * The count in the SelfIDCount register is the number of
1857          * bytes in the self ID receive buffer.  Since we also receive
1858          * the inverted quadlets and a header quadlet, we shift one
1859          * bit extra to get the actual number of self IDs.
1860          */
1861         self_id_count = (reg >> 3) & 0xff;
1862
1863         if (self_id_count > 252) {
1864                 dev_notice(ohci->card.device, "inconsistent self IDs\n");
1865                 return;
1866         }
1867
1868         generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1869         rmb();
1870
1871         for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1872                 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1873                         /*
1874                          * If the invalid data looks like a cycle start packet,
1875                          * it's likely to be the result of the cycle master
1876                          * having a wrong gap count.  In this case, the self IDs
1877                          * so far are valid and should be processed so that the
1878                          * bus manager can then correct the gap count.
1879                          */
1880                         if (cond_le32_to_cpu(ohci->self_id_cpu[i])
1881                                                         == 0xffff008f) {
1882                                 dev_notice(ohci->card.device,
1883                                            "ignoring spurious self IDs\n");
1884                                 self_id_count = j;
1885                                 break;
1886                         } else {
1887                                 dev_notice(ohci->card.device,
1888                                            "inconsistent self IDs\n");
1889                                 return;
1890                         }
1891                 }
1892                 ohci->self_id_buffer[j] =
1893                                 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1894         }
1895
1896         if (ohci->quirks & QUIRK_TI_SLLZ059) {
1897                 self_id_count = find_and_insert_self_id(ohci, self_id_count);
1898                 if (self_id_count < 0) {
1899                         dev_notice(ohci->card.device,
1900                                    "could not construct local self ID\n");
1901                         return;
1902                 }
1903         }
1904
1905         if (self_id_count == 0) {
1906                 dev_notice(ohci->card.device, "inconsistent self IDs\n");
1907                 return;
1908         }
1909         rmb();
1910
1911         /*
1912          * Check the consistency of the self IDs we just read.  The
1913          * problem we face is that a new bus reset can start while we
1914          * read out the self IDs from the DMA buffer. If this happens,
1915          * the DMA buffer will be overwritten with new self IDs and we
1916          * will read out inconsistent data.  The OHCI specification
1917          * (section 11.2) recommends a technique similar to
1918          * linux/seqlock.h, where we remember the generation of the
1919          * self IDs in the buffer before reading them out and compare
1920          * it to the current generation after reading them out.  If
1921          * the two generations match we know we have a consistent set
1922          * of self IDs.
1923          */
1924
1925         new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1926         if (new_generation != generation) {
1927                 dev_notice(ohci->card.device,
1928                            "new bus reset, discarding self ids\n");
1929                 return;
1930         }
1931
1932         /* FIXME: Document how the locking works. */
1933         spin_lock_irqsave(&ohci->lock, flags);
1934
1935         ohci->generation = -1; /* prevent AT packet queueing */
1936         context_stop(&ohci->at_request_ctx);
1937         context_stop(&ohci->at_response_ctx);
1938
1939         spin_unlock_irqrestore(&ohci->lock, flags);
1940
1941         /*
1942          * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
1943          * packets in the AT queues and software needs to drain them.
1944          * Some OHCI 1.1 controllers (JMicron) apparently require this too.
1945          */
1946         at_context_flush(&ohci->at_request_ctx);
1947         at_context_flush(&ohci->at_response_ctx);
1948
1949         spin_lock_irqsave(&ohci->lock, flags);
1950
1951         ohci->generation = generation;
1952         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1953
1954         if (ohci->quirks & QUIRK_RESET_PACKET)
1955                 ohci->request_generation = generation;
1956
1957         /*
1958          * This next bit is unrelated to the AT context stuff but we
1959          * have to do it under the spinlock also.  If a new config rom
1960          * was set up before this reset, the old one is now no longer
1961          * in use and we can free it. Update the config rom pointers
1962          * to point to the current config rom and clear the
1963          * next_config_rom pointer so a new update can take place.
1964          */
1965
1966         if (ohci->next_config_rom != NULL) {
1967                 if (ohci->next_config_rom != ohci->config_rom) {
1968                         free_rom      = ohci->config_rom;
1969                         free_rom_bus  = ohci->config_rom_bus;
1970                 }
1971                 ohci->config_rom      = ohci->next_config_rom;
1972                 ohci->config_rom_bus  = ohci->next_config_rom_bus;
1973                 ohci->next_config_rom = NULL;
1974
1975                 /*
1976                  * Restore config_rom image and manually update
1977                  * config_rom registers.  Writing the header quadlet
1978                  * will indicate that the config rom is ready, so we
1979                  * do that last.
1980                  */
1981                 reg_write(ohci, OHCI1394_BusOptions,
1982                           be32_to_cpu(ohci->config_rom[2]));
1983                 ohci->config_rom[0] = ohci->next_header;
1984                 reg_write(ohci, OHCI1394_ConfigROMhdr,
1985                           be32_to_cpu(ohci->next_header));
1986         }
1987
1988 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1989         reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1990         reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1991 #endif
1992
1993         spin_unlock_irqrestore(&ohci->lock, flags);
1994
1995         if (free_rom)
1996                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1997                                   free_rom, free_rom_bus);
1998
1999         log_selfids(ohci, generation, self_id_count);
2000
2001         fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
2002                                  self_id_count, ohci->self_id_buffer,
2003                                  ohci->csr_state_setclear_abdicate);
2004         ohci->csr_state_setclear_abdicate = false;
2005 }
2006
2007 static irqreturn_t irq_handler(int irq, void *data)
2008 {
2009         struct fw_ohci *ohci = data;
2010         u32 event, iso_event;
2011         int i;
2012
2013         event = reg_read(ohci, OHCI1394_IntEventClear);
2014
2015         if (!event || !~event)
2016                 return IRQ_NONE;
2017
2018         /*
2019          * busReset and postedWriteErr must not be cleared yet
2020          * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
2021          */
2022         reg_write(ohci, OHCI1394_IntEventClear,
2023                   event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
2024         log_irqs(ohci, event);
2025
2026         if (event & OHCI1394_selfIDComplete)
2027                 queue_work(fw_workqueue, &ohci->bus_reset_work);
2028
2029         if (event & OHCI1394_RQPkt)
2030                 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
2031
2032         if (event & OHCI1394_RSPkt)
2033                 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
2034
2035         if (event & OHCI1394_reqTxComplete)
2036                 tasklet_schedule(&ohci->at_request_ctx.tasklet);
2037
2038         if (event & OHCI1394_respTxComplete)
2039                 tasklet_schedule(&ohci->at_response_ctx.tasklet);
2040
2041         if (event & OHCI1394_isochRx) {
2042                 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
2043                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
2044
2045                 while (iso_event) {
2046                         i = ffs(iso_event) - 1;
2047                         tasklet_schedule(
2048                                 &ohci->ir_context_list[i].context.tasklet);
2049                         iso_event &= ~(1 << i);
2050                 }
2051         }
2052
2053         if (event & OHCI1394_isochTx) {
2054                 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
2055                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
2056
2057                 while (iso_event) {
2058                         i = ffs(iso_event) - 1;
2059                         tasklet_schedule(
2060                                 &ohci->it_context_list[i].context.tasklet);
2061                         iso_event &= ~(1 << i);
2062                 }
2063         }
2064
2065         if (unlikely(event & OHCI1394_regAccessFail))
2066                 dev_err(ohci->card.device, "register access failure\n");
2067
2068         if (unlikely(event & OHCI1394_postedWriteErr)) {
2069                 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2070                 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2071                 reg_write(ohci, OHCI1394_IntEventClear,
2072                           OHCI1394_postedWriteErr);
2073                 if (printk_ratelimit())
2074                         dev_err(ohci->card.device, "PCI posted write error\n");
2075         }
2076
2077         if (unlikely(event & OHCI1394_cycleTooLong)) {
2078                 if (printk_ratelimit())
2079                         dev_notice(ohci->card.device,
2080                                    "isochronous cycle too long\n");
2081                 reg_write(ohci, OHCI1394_LinkControlSet,
2082                           OHCI1394_LinkControl_cycleMaster);
2083         }
2084
2085         if (unlikely(event & OHCI1394_cycleInconsistent)) {
2086                 /*
2087                  * We need to clear this event bit in order to make
2088                  * cycleMatch isochronous I/O work.  In theory we should
2089                  * stop active cycleMatch iso contexts now and restart
2090                  * them at least two cycles later.  (FIXME?)
2091                  */
2092                 if (printk_ratelimit())
2093                         dev_notice(ohci->card.device,
2094                                    "isochronous cycle inconsistent\n");
2095         }
2096
2097         if (unlikely(event & OHCI1394_unrecoverableError))
2098                 handle_dead_contexts(ohci);
2099
2100         if (event & OHCI1394_cycle64Seconds) {
2101                 spin_lock(&ohci->lock);
2102                 update_bus_time(ohci);
2103                 spin_unlock(&ohci->lock);
2104         } else
2105                 flush_writes(ohci);
2106
2107         return IRQ_HANDLED;
2108 }
2109
2110 static int software_reset(struct fw_ohci *ohci)
2111 {
2112         u32 val;
2113         int i;
2114
2115         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
2116         for (i = 0; i < 500; i++) {
2117                 val = reg_read(ohci, OHCI1394_HCControlSet);
2118                 if (!~val)
2119                         return -ENODEV; /* Card was ejected. */
2120
2121                 if (!(val & OHCI1394_HCControl_softReset))
2122                         return 0;
2123
2124                 msleep(1);
2125         }
2126
2127         return -EBUSY;
2128 }
2129
2130 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
2131 {
2132         size_t size = length * 4;
2133
2134         memcpy(dest, src, size);
2135         if (size < CONFIG_ROM_SIZE)
2136                 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
2137 }
2138
2139 static int configure_1394a_enhancements(struct fw_ohci *ohci)
2140 {
2141         bool enable_1394a;
2142         int ret, clear, set, offset;
2143
2144         /* Check if the driver should configure link and PHY. */
2145         if (!(reg_read(ohci, OHCI1394_HCControlSet) &
2146               OHCI1394_HCControl_programPhyEnable))
2147                 return 0;
2148
2149         /* Paranoia: check whether the PHY supports 1394a, too. */
2150         enable_1394a = false;
2151         ret = read_phy_reg(ohci, 2);
2152         if (ret < 0)
2153                 return ret;
2154         if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2155                 ret = read_paged_phy_reg(ohci, 1, 8);
2156                 if (ret < 0)
2157                         return ret;
2158                 if (ret >= 1)
2159                         enable_1394a = true;
2160         }
2161
2162         if (ohci->quirks & QUIRK_NO_1394A)
2163                 enable_1394a = false;
2164
2165         /* Configure PHY and link consistently. */
2166         if (enable_1394a) {
2167                 clear = 0;
2168                 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2169         } else {
2170                 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2171                 set = 0;
2172         }
2173         ret = update_phy_reg(ohci, 5, clear, set);
2174         if (ret < 0)
2175                 return ret;
2176
2177         if (enable_1394a)
2178                 offset = OHCI1394_HCControlSet;
2179         else
2180                 offset = OHCI1394_HCControlClear;
2181         reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2182
2183         /* Clean up: configuration has been taken care of. */
2184         reg_write(ohci, OHCI1394_HCControlClear,
2185                   OHCI1394_HCControl_programPhyEnable);
2186
2187         return 0;
2188 }
2189
2190 static int probe_tsb41ba3d(struct fw_ohci *ohci)
2191 {
2192         /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
2193         static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2194         int reg, i;
2195
2196         reg = read_phy_reg(ohci, 2);
2197         if (reg < 0)
2198                 return reg;
2199         if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2200                 return 0;
2201
2202         for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
2203                 reg = read_paged_phy_reg(ohci, 1, i + 10);
2204                 if (reg < 0)
2205                         return reg;
2206                 if (reg != id[i])
2207                         return 0;
2208         }
2209         return 1;
2210 }
2211
2212 static int ohci_enable(struct fw_card *card,
2213                        const __be32 *config_rom, size_t length)
2214 {
2215         struct fw_ohci *ohci = fw_ohci(card);
2216         struct pci_dev *dev = to_pci_dev(card->device);
2217         u32 lps, seconds, version, irqs;
2218         int i, ret;
2219
2220         if (software_reset(ohci)) {
2221                 dev_err(card->device, "failed to reset ohci card\n");
2222                 return -EBUSY;
2223         }
2224
2225         /*
2226          * Now enable LPS, which we need in order to start accessing
2227          * most of the registers.  In fact, on some cards (ALI M5251),
2228          * accessing registers in the SClk domain without LPS enabled
2229          * will lock up the machine.  Wait 50msec to make sure we have
2230          * full link enabled.  However, with some cards (well, at least
2231          * a JMicron PCIe card), we have to try again sometimes.
2232          */
2233         reg_write(ohci, OHCI1394_HCControlSet,
2234                   OHCI1394_HCControl_LPS |
2235                   OHCI1394_HCControl_postedWriteEnable);
2236         flush_writes(ohci);
2237
2238         for (lps = 0, i = 0; !lps && i < 3; i++) {
2239                 msleep(50);
2240                 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2241                       OHCI1394_HCControl_LPS;
2242         }
2243
2244         if (!lps) {
2245                 dev_err(card->device, "failed to set Link Power Status\n");
2246                 return -EIO;
2247         }
2248
2249         if (ohci->quirks & QUIRK_TI_SLLZ059) {
2250                 ret = probe_tsb41ba3d(ohci);
2251                 if (ret < 0)
2252                         return ret;
2253                 if (ret)
2254                         dev_notice(card->device, "local TSB41BA3D phy\n");
2255                 else
2256                         ohci->quirks &= ~QUIRK_TI_SLLZ059;
2257         }
2258
2259         reg_write(ohci, OHCI1394_HCControlClear,
2260                   OHCI1394_HCControl_noByteSwapData);
2261
2262         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2263         reg_write(ohci, OHCI1394_LinkControlSet,
2264                   OHCI1394_LinkControl_cycleTimerEnable |
2265                   OHCI1394_LinkControl_cycleMaster);
2266
2267         reg_write(ohci, OHCI1394_ATRetries,
2268                   OHCI1394_MAX_AT_REQ_RETRIES |
2269                   (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
2270                   (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2271                   (200 << 16));
2272
2273         seconds = lower_32_bits(get_seconds());
2274         reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25);
2275         ohci->bus_time = seconds & ~0x3f;
2276
2277         version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2278         if (version >= OHCI_VERSION_1_1) {
2279                 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2280                           0xfffffffe);
2281                 card->broadcast_channel_auto_allocated = true;
2282         }
2283
2284         /* Get implemented bits of the priority arbitration request counter. */
2285         reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2286         ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2287         reg_write(ohci, OHCI1394_FairnessControl, 0);
2288         card->priority_budget_implemented = ohci->pri_req_max != 0;
2289
2290         reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
2291         reg_write(ohci, OHCI1394_IntEventClear, ~0);
2292         reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2293
2294         ret = configure_1394a_enhancements(ohci);
2295         if (ret < 0)
2296                 return ret;
2297
2298         /* Activate link_on bit and contender bit in our self ID packets.*/
2299         ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2300         if (ret < 0)
2301                 return ret;
2302
2303         /*
2304          * When the link is not yet enabled, the atomic config rom
2305          * update mechanism described below in ohci_set_config_rom()
2306          * is not active.  We have to update ConfigRomHeader and
2307          * BusOptions manually, and the write to ConfigROMmap takes
2308          * effect immediately.  We tie this to the enabling of the
2309          * link, so we have a valid config rom before enabling - the
2310          * OHCI requires that ConfigROMhdr and BusOptions have valid
2311          * values before enabling.
2312          *
2313          * However, when the ConfigROMmap is written, some controllers
2314          * always read back quadlets 0 and 2 from the config rom to
2315          * the ConfigRomHeader and BusOptions registers on bus reset.
2316          * They shouldn't do that in this initial case where the link
2317          * isn't enabled.  This means we have to use the same
2318          * workaround here, setting the bus header to 0 and then write
2319          * the right values in the bus reset tasklet.
2320          */
2321
2322         if (config_rom) {
2323                 ohci->next_config_rom =
2324                         dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2325                                            &ohci->next_config_rom_bus,
2326                                            GFP_KERNEL);
2327                 if (ohci->next_config_rom == NULL)
2328                         return -ENOMEM;
2329
2330                 copy_config_rom(ohci->next_config_rom, config_rom, length);
2331         } else {
2332                 /*
2333                  * In the suspend case, config_rom is NULL, which
2334                  * means that we just reuse the old config rom.
2335                  */
2336                 ohci->next_config_rom = ohci->config_rom;
2337                 ohci->next_config_rom_bus = ohci->config_rom_bus;
2338         }
2339
2340         ohci->next_header = ohci->next_config_rom[0];
2341         ohci->next_config_rom[0] = 0;
2342         reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2343         reg_write(ohci, OHCI1394_BusOptions,
2344                   be32_to_cpu(ohci->next_config_rom[2]));
2345         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2346
2347         reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2348
2349         if (!(ohci->quirks & QUIRK_NO_MSI))
2350                 pci_enable_msi(dev);
2351         if (request_irq(dev->irq, irq_handler,
2352                         pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
2353                         ohci_driver_name, ohci)) {
2354                 dev_err(card->device, "failed to allocate interrupt %d\n",
2355                         dev->irq);
2356                 pci_disable_msi(dev);
2357
2358                 if (config_rom) {
2359                         dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2360                                           ohci->next_config_rom,
2361                                           ohci->next_config_rom_bus);
2362                         ohci->next_config_rom = NULL;
2363                 }
2364                 return -EIO;
2365         }
2366
2367         irqs =  OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2368                 OHCI1394_RQPkt | OHCI1394_RSPkt |
2369                 OHCI1394_isochTx | OHCI1394_isochRx |
2370                 OHCI1394_postedWriteErr |
2371                 OHCI1394_selfIDComplete |
2372                 OHCI1394_regAccessFail |
2373                 OHCI1394_cycle64Seconds |
2374                 OHCI1394_cycleInconsistent |
2375                 OHCI1394_unrecoverableError |
2376                 OHCI1394_cycleTooLong |
2377                 OHCI1394_masterIntEnable;
2378         if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
2379                 irqs |= OHCI1394_busReset;
2380         reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2381
2382         reg_write(ohci, OHCI1394_HCControlSet,
2383                   OHCI1394_HCControl_linkEnable |
2384                   OHCI1394_HCControl_BIBimageValid);
2385
2386         reg_write(ohci, OHCI1394_LinkControlSet,
2387                   OHCI1394_LinkControl_rcvSelfID |
2388                   OHCI1394_LinkControl_rcvPhyPkt);
2389
2390         ar_context_run(&ohci->ar_request_ctx);
2391         ar_context_run(&ohci->ar_response_ctx);
2392
2393         flush_writes(ohci);
2394
2395         /* We are ready to go, reset bus to finish initialization. */
2396         fw_schedule_bus_reset(&ohci->card, false, true);
2397
2398         return 0;
2399 }
2400
2401 static int ohci_set_config_rom(struct fw_card *card,
2402                                const __be32 *config_rom, size_t length)
2403 {
2404         struct fw_ohci *ohci;
2405         unsigned long flags;
2406         __be32 *next_config_rom;
2407         dma_addr_t uninitialized_var(next_config_rom_bus);
2408
2409         ohci = fw_ohci(card);
2410
2411         /*
2412          * When the OHCI controller is enabled, the config rom update
2413          * mechanism is a bit tricky, but easy enough to use.  See
2414          * section 5.5.6 in the OHCI specification.
2415          *
2416          * The OHCI controller caches the new config rom address in a
2417          * shadow register (ConfigROMmapNext) and needs a bus reset
2418          * for the changes to take place.  When the bus reset is
2419          * detected, the controller loads the new values for the
2420          * ConfigRomHeader and BusOptions registers from the specified
2421          * config rom and loads ConfigROMmap from the ConfigROMmapNext
2422          * shadow register. All automatically and atomically.
2423          *
2424          * Now, there's a twist to this story.  The automatic load of
2425          * ConfigRomHeader and BusOptions doesn't honor the
2426          * noByteSwapData bit, so with a be32 config rom, the
2427          * controller will load be32 values in to these registers
2428          * during the atomic update, even on litte endian
2429          * architectures.  The workaround we use is to put a 0 in the
2430          * header quadlet; 0 is endian agnostic and means that the
2431          * config rom isn't ready yet.  In the bus reset tasklet we
2432          * then set up the real values for the two registers.
2433          *
2434          * We use ohci->lock to avoid racing with the code that sets
2435          * ohci->next_config_rom to NULL (see bus_reset_work).
2436          */
2437
2438         next_config_rom =
2439                 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2440                                    &next_config_rom_bus, GFP_KERNEL);
2441         if (next_config_rom == NULL)
2442                 return -ENOMEM;
2443
2444         spin_lock_irqsave(&ohci->lock, flags);
2445
2446         /*
2447          * If there is not an already pending config_rom update,
2448          * push our new allocation into the ohci->next_config_rom
2449          * and then mark the local variable as null so that we
2450          * won't deallocate the new buffer.
2451          *
2452          * OTOH, if there is a pending config_rom update, just
2453          * use that buffer with the new config_rom data, and
2454          * let this routine free the unused DMA allocation.
2455          */
2456
2457         if (ohci->next_config_rom == NULL) {
2458                 ohci->next_config_rom = next_config_rom;
2459                 ohci->next_config_rom_bus = next_config_rom_bus;
2460                 next_config_rom = NULL;
2461         }
2462
2463         copy_config_rom(ohci->next_config_rom, config_rom, length);
2464
2465         ohci->next_header = config_rom[0];
2466         ohci->next_config_rom[0] = 0;
2467
2468         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2469
2470         spin_unlock_irqrestore(&ohci->lock, flags);
2471
2472         /* If we didn't use the DMA allocation, delete it. */
2473         if (next_config_rom != NULL)
2474                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2475                                   next_config_rom, next_config_rom_bus);
2476
2477         /*
2478          * Now initiate a bus reset to have the changes take
2479          * effect. We clean up the old config rom memory and DMA
2480          * mappings in the bus reset tasklet, since the OHCI
2481          * controller could need to access it before the bus reset
2482          * takes effect.
2483          */
2484
2485         fw_schedule_bus_reset(&ohci->card, true, true);
2486
2487         return 0;
2488 }
2489
2490 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2491 {
2492         struct fw_ohci *ohci = fw_ohci(card);
2493
2494         at_context_transmit(&ohci->at_request_ctx, packet);
2495 }
2496
2497 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2498 {
2499         struct fw_ohci *ohci = fw_ohci(card);
2500
2501         at_context_transmit(&ohci->at_response_ctx, packet);
2502 }
2503
2504 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2505 {
2506         struct fw_ohci *ohci = fw_ohci(card);
2507         struct context *ctx = &ohci->at_request_ctx;
2508         struct driver_data *driver_data = packet->driver_data;
2509         int ret = -ENOENT;
2510
2511         tasklet_disable(&ctx->tasklet);
2512
2513         if (packet->ack != 0)
2514                 goto out;
2515
2516         if (packet->payload_mapped)
2517                 dma_unmap_single(ohci->card.device, packet->payload_bus,
2518                                  packet->payload_length, DMA_TO_DEVICE);
2519
2520         log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
2521         driver_data->packet = NULL;
2522         packet->ack = RCODE_CANCELLED;
2523         packet->callback(packet, &ohci->card, packet->ack);
2524         ret = 0;
2525  out:
2526         tasklet_enable(&ctx->tasklet);
2527
2528         return ret;
2529 }
2530
2531 static int ohci_enable_phys_dma(struct fw_card *card,
2532                                 int node_id, int generation)
2533 {
2534 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
2535         return 0;
2536 #else
2537         struct fw_ohci *ohci = fw_ohci(card);
2538         unsigned long flags;
2539         int n, ret = 0;
2540
2541         /*
2542          * FIXME:  Make sure this bitmask is cleared when we clear the busReset
2543          * interrupt bit.  Clear physReqResourceAllBuses on bus reset.
2544          */
2545
2546         spin_lock_irqsave(&ohci->lock, flags);
2547
2548         if (ohci->generation != generation) {
2549                 ret = -ESTALE;
2550                 goto out;
2551         }
2552
2553         /*
2554          * Note, if the node ID contains a non-local bus ID, physical DMA is
2555          * enabled for _all_ nodes on remote buses.
2556          */
2557
2558         n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2559         if (n < 32)
2560                 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2561         else
2562                 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2563
2564         flush_writes(ohci);
2565  out:
2566         spin_unlock_irqrestore(&ohci->lock, flags);
2567
2568         return ret;
2569 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
2570 }
2571
2572 static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2573 {
2574         struct fw_ohci *ohci = fw_ohci(card);
2575         unsigned long flags;
2576         u32 value;
2577
2578         switch (csr_offset) {
2579         case CSR_STATE_CLEAR:
2580         case CSR_STATE_SET:
2581                 if (ohci->is_root &&
2582                     (reg_read(ohci, OHCI1394_LinkControlSet) &
2583                      OHCI1394_LinkControl_cycleMaster))
2584                         value = CSR_STATE_BIT_CMSTR;
2585                 else
2586                         value = 0;
2587                 if (ohci->csr_state_setclear_abdicate)
2588                         value |= CSR_STATE_BIT_ABDICATE;
2589
2590                 return value;
2591
2592         case CSR_NODE_IDS:
2593                 return reg_read(ohci, OHCI1394_NodeID) << 16;
2594
2595         case CSR_CYCLE_TIME:
2596                 return get_cycle_time(ohci);
2597
2598         case CSR_BUS_TIME:
2599                 /*
2600                  * We might be called just after the cycle timer has wrapped
2601                  * around but just before the cycle64Seconds handler, so we
2602                  * better check here, too, if the bus time needs to be updated.
2603                  */
2604                 spin_lock_irqsave(&ohci->lock, flags);
2605                 value = update_bus_time(ohci);
2606                 spin_unlock_irqrestore(&ohci->lock, flags);
2607                 return value;
2608
2609         case CSR_BUSY_TIMEOUT:
2610                 value = reg_read(ohci, OHCI1394_ATRetries);
2611                 return (value >> 4) & 0x0ffff00f;
2612
2613         case CSR_PRIORITY_BUDGET:
2614                 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2615                         (ohci->pri_req_max << 8);
2616
2617         default:
2618                 WARN_ON(1);
2619                 return 0;
2620         }
2621 }
2622
2623 static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2624 {
2625         struct fw_ohci *ohci = fw_ohci(card);
2626         unsigned long flags;
2627
2628         switch (csr_offset) {
2629         case CSR_STATE_CLEAR:
2630                 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2631                         reg_write(ohci, OHCI1394_LinkControlClear,
2632                                   OHCI1394_LinkControl_cycleMaster);
2633                         flush_writes(ohci);
2634                 }
2635                 if (value & CSR_STATE_BIT_ABDICATE)
2636                         ohci->csr_state_setclear_abdicate = false;
2637                 break;
2638
2639         case CSR_STATE_SET:
2640                 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2641                         reg_write(ohci, OHCI1394_LinkControlSet,
2642                                   OHCI1394_LinkControl_cycleMaster);
2643                         flush_writes(ohci);
2644                 }
2645                 if (value & CSR_STATE_BIT_ABDICATE)
2646                         ohci->csr_state_setclear_abdicate = true;
2647                 break;
2648
2649         case CSR_NODE_IDS:
2650                 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2651                 flush_writes(ohci);
2652                 break;
2653
2654         case CSR_CYCLE_TIME:
2655                 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2656                 reg_write(ohci, OHCI1394_IntEventSet,
2657                           OHCI1394_cycleInconsistent);
2658                 flush_writes(ohci);
2659                 break;
2660
2661         case CSR_BUS_TIME:
2662                 spin_lock_irqsave(&ohci->lock, flags);
2663                 ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f);
2664                 spin_unlock_irqrestore(&ohci->lock, flags);
2665                 break;
2666
2667         case CSR_BUSY_TIMEOUT:
2668                 value = (value & 0xf) | ((value & 0xf) << 4) |
2669                         ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2670                 reg_write(ohci, OHCI1394_ATRetries, value);
2671                 flush_writes(ohci);
2672                 break;
2673
2674         case CSR_PRIORITY_BUDGET:
2675                 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2676                 flush_writes(ohci);
2677                 break;
2678
2679         default:
2680                 WARN_ON(1);
2681                 break;
2682         }
2683 }
2684
2685 static void flush_iso_completions(struct iso_context *ctx)
2686 {
2687         ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
2688                               ctx->header_length, ctx->header,
2689                               ctx->base.callback_data);
2690         ctx->header_length = 0;
2691 }
2692
2693 static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2694 {
2695         u32 *ctx_hdr;
2696
2697         if (ctx->header_length + ctx->base.header_size > PAGE_SIZE)
2698                 flush_iso_completions(ctx);
2699
2700         ctx_hdr = ctx->header + ctx->header_length;
2701         ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
2702
2703         /*
2704          * The two iso header quadlets are byteswapped to little
2705          * endian by the controller, but we want to present them
2706          * as big endian for consistency with the bus endianness.
2707          */
2708         if (ctx->base.header_size > 0)
2709                 ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */
2710         if (ctx->base.header_size > 4)
2711                 ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
2712         if (ctx->base.header_size > 8)
2713                 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
2714         ctx->header_length += ctx->base.header_size;
2715 }
2716
2717 static int handle_ir_packet_per_buffer(struct context *context,
2718                                        struct descriptor *d,
2719                                        struct descriptor *last)
2720 {
2721         struct iso_context *ctx =
2722                 container_of(context, struct iso_context, context);
2723         struct descriptor *pd;
2724         u32 buffer_dma;
2725
2726         for (pd = d; pd <= last; pd++)
2727                 if (pd->transfer_status)
2728                         break;
2729         if (pd > last)
2730                 /* Descriptor(s) not done yet, stop iteration */
2731                 return 0;
2732
2733         while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
2734                 d++;
2735                 buffer_dma = le32_to_cpu(d->data_address);
2736                 dma_sync_single_range_for_cpu(context->ohci->card.device,
2737                                               buffer_dma & PAGE_MASK,
2738                                               buffer_dma & ~PAGE_MASK,
2739                                               le16_to_cpu(d->req_count),
2740                                               DMA_FROM_DEVICE);
2741         }
2742
2743         copy_iso_headers(ctx, (u32 *) (last + 1));
2744
2745         if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2746                 flush_iso_completions(ctx);
2747
2748         return 1;
2749 }
2750
2751 /* d == last because each descriptor block is only a single descriptor. */
2752 static int handle_ir_buffer_fill(struct context *context,
2753                                  struct descriptor *d,
2754                                  struct descriptor *last)
2755 {
2756         struct iso_context *ctx =
2757                 container_of(context, struct iso_context, context);
2758         unsigned int req_count, res_count, completed;
2759         u32 buffer_dma;
2760
2761         req_count = le16_to_cpu(last->req_count);
2762         res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
2763         completed = req_count - res_count;
2764         buffer_dma = le32_to_cpu(last->data_address);
2765
2766         if (completed > 0) {
2767                 ctx->mc_buffer_bus = buffer_dma;
2768                 ctx->mc_completed = completed;
2769         }
2770
2771         if (res_count != 0)
2772                 /* Descriptor(s) not done yet, stop iteration */
2773                 return 0;
2774
2775         dma_sync_single_range_for_cpu(context->ohci->card.device,
2776                                       buffer_dma & PAGE_MASK,
2777                                       buffer_dma & ~PAGE_MASK,
2778                                       completed, DMA_FROM_DEVICE);
2779
2780         if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
2781                 ctx->base.callback.mc(&ctx->base,
2782                                       buffer_dma + completed,
2783                                       ctx->base.callback_data);
2784                 ctx->mc_completed = 0;
2785         }
2786
2787         return 1;
2788 }
2789
2790 static void flush_ir_buffer_fill(struct iso_context *ctx)
2791 {
2792         dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
2793                                       ctx->mc_buffer_bus & PAGE_MASK,
2794                                       ctx->mc_buffer_bus & ~PAGE_MASK,
2795                                       ctx->mc_completed, DMA_FROM_DEVICE);
2796
2797         ctx->base.callback.mc(&ctx->base,
2798                               ctx->mc_buffer_bus + ctx->mc_completed,
2799                               ctx->base.callback_data);
2800         ctx->mc_completed = 0;
2801 }
2802
2803 static inline void sync_it_packet_for_cpu(struct context *context,
2804                                           struct descriptor *pd)
2805 {
2806         __le16 control;
2807         u32 buffer_dma;
2808
2809         /* only packets beginning with OUTPUT_MORE* have data buffers */
2810         if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2811                 return;
2812
2813         /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
2814         pd += 2;
2815
2816         /*
2817          * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
2818          * data buffer is in the context program's coherent page and must not
2819          * be synced.
2820          */
2821         if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
2822             (context->current_bus          & PAGE_MASK)) {
2823                 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2824                         return;
2825                 pd++;
2826         }
2827
2828         do {
2829                 buffer_dma = le32_to_cpu(pd->data_address);
2830                 dma_sync_single_range_for_cpu(context->ohci->card.device,
2831                                               buffer_dma & PAGE_MASK,
2832                                               buffer_dma & ~PAGE_MASK,
2833                                               le16_to_cpu(pd->req_count),
2834                                               DMA_TO_DEVICE);
2835                 control = pd->control;
2836                 pd++;
2837         } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
2838 }
2839
2840 static int handle_it_packet(struct context *context,
2841                             struct descriptor *d,
2842                             struct descriptor *last)
2843 {
2844         struct iso_context *ctx =
2845                 container_of(context, struct iso_context, context);
2846         struct descriptor *pd;
2847         __be32 *ctx_hdr;
2848
2849         for (pd = d; pd <= last; pd++)
2850                 if (pd->transfer_status)
2851                         break;
2852         if (pd > last)
2853                 /* Descriptor(s) not done yet, stop iteration */
2854                 return 0;
2855
2856         sync_it_packet_for_cpu(context, d);
2857
2858         if (ctx->header_length + 4 > PAGE_SIZE)
2859                 flush_iso_completions(ctx);
2860
2861         ctx_hdr = ctx->header + ctx->header_length;
2862         ctx->last_timestamp = le16_to_cpu(last->res_count);
2863         /* Present this value as big-endian to match the receive code */
2864         *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
2865                                le16_to_cpu(pd->res_count));
2866         ctx->header_length += 4;
2867
2868         if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2869                 flush_iso_completions(ctx);
2870
2871         return 1;
2872 }
2873
2874 static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2875 {
2876         u32 hi = channels >> 32, lo = channels;
2877
2878         reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2879         reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2880         reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2881         reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2882         mmiowb();
2883         ohci->mc_channels = channels;
2884 }
2885
2886 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2887                                 int type, int channel, size_t header_size)
2888 {
2889         struct fw_ohci *ohci = fw_ohci(card);
2890         struct iso_context *uninitialized_var(ctx);
2891         descriptor_callback_t uninitialized_var(callback);
2892         u64 *uninitialized_var(channels);
2893         u32 *uninitialized_var(mask), uninitialized_var(regs);
2894         unsigned long flags;
2895         int index, ret = -EBUSY;
2896
2897         spin_lock_irqsave(&ohci->lock, flags);
2898
2899         switch (type) {
2900         case FW_ISO_CONTEXT_TRANSMIT:
2901                 mask     = &ohci->it_context_mask;
2902                 callback = handle_it_packet;
2903                 index    = ffs(*mask) - 1;
2904                 if (index >= 0) {
2905                         *mask &= ~(1 << index);
2906                         regs = OHCI1394_IsoXmitContextBase(index);
2907                         ctx  = &ohci->it_context_list[index];
2908                 }
2909                 break;
2910
2911         case FW_ISO_CONTEXT_RECEIVE:
2912                 channels = &ohci->ir_context_channels;
2913                 mask     = &ohci->ir_context_mask;
2914                 callback = handle_ir_packet_per_buffer;
2915                 index    = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2916                 if (index >= 0) {
2917                         *channels &= ~(1ULL << channel);
2918                         *mask     &= ~(1 << index);
2919                         regs = OHCI1394_IsoRcvContextBase(index);
2920                         ctx  = &ohci->ir_context_list[index];
2921                 }
2922                 break;
2923
2924         case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2925                 mask     = &ohci->ir_context_mask;
2926                 callback = handle_ir_buffer_fill;
2927                 index    = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
2928                 if (index >= 0) {
2929                         ohci->mc_allocated = true;
2930                         *mask &= ~(1 << index);
2931                         regs = OHCI1394_IsoRcvContextBase(index);
2932                         ctx  = &ohci->ir_context_list[index];
2933                 }
2934                 break;
2935
2936         default:
2937                 index = -1;
2938                 ret = -ENOSYS;
2939         }
2940
2941         spin_unlock_irqrestore(&ohci->lock, flags);
2942
2943         if (index < 0)
2944                 return ERR_PTR(ret);
2945
2946         memset(ctx, 0, sizeof(*ctx));
2947         ctx->header_length = 0;
2948         ctx->header = (void *) __get_free_page(GFP_KERNEL);
2949         if (ctx->header == NULL) {
2950                 ret = -ENOMEM;
2951                 goto out;
2952         }
2953         ret = context_init(&ctx->context, ohci, regs, callback);
2954         if (ret < 0)
2955                 goto out_with_header;
2956
2957         if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
2958                 set_multichannel_mask(ohci, 0);
2959                 ctx->mc_completed = 0;
2960         }
2961
2962         return &ctx->base;
2963
2964  out_with_header:
2965         free_page((unsigned long)ctx->header);
2966  out:
2967         spin_lock_irqsave(&ohci->lock, flags);
2968
2969         switch (type) {
2970         case FW_ISO_CONTEXT_RECEIVE:
2971                 *channels |= 1ULL << channel;
2972                 break;
2973
2974         case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2975                 ohci->mc_allocated = false;
2976                 break;
2977         }
2978         *mask |= 1 << index;
2979
2980         spin_unlock_irqrestore(&ohci->lock, flags);
2981
2982         return ERR_PTR(ret);
2983 }
2984
2985 static int ohci_start_iso(struct fw_iso_context *base,
2986                           s32 cycle, u32 sync, u32 tags)
2987 {
2988         struct iso_context *ctx = container_of(base, struct iso_context, base);
2989         struct fw_ohci *ohci = ctx->context.ohci;
2990         u32 control = IR_CONTEXT_ISOCH_HEADER, match;
2991         int index;
2992
2993         /* the controller cannot start without any queued packets */
2994         if (ctx->context.last->branch_address == 0)
2995                 return -ENODATA;
2996
2997         switch (ctx->base.type) {
2998         case FW_ISO_CONTEXT_TRANSMIT:
2999                 index = ctx - ohci->it_context_list;
3000                 match = 0;
3001                 if (cycle >= 0)
3002                         match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
3003                                 (cycle & 0x7fff) << 16;
3004
3005                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
3006                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
3007                 context_run(&ctx->context, match);
3008                 break;
3009
3010         case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3011                 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
3012                 /* fall through */
3013         case FW_ISO_CONTEXT_RECEIVE:
3014                 index = ctx - ohci->ir_context_list;
3015                 match = (tags << 28) | (sync << 8) | ctx->base.channel;
3016                 if (cycle >= 0) {
3017                         match |= (cycle & 0x07fff) << 12;
3018                         control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
3019                 }
3020
3021                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
3022                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
3023                 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
3024                 context_run(&ctx->context, control);
3025
3026                 ctx->sync = sync;
3027                 ctx->tags = tags;
3028
3029                 break;
3030         }
3031
3032         return 0;
3033 }
3034
3035 static int ohci_stop_iso(struct fw_iso_context *base)
3036 {
3037         struct fw_ohci *ohci = fw_ohci(base->card);
3038         struct iso_context *ctx = container_of(base, struct iso_context, base);
3039         int index;
3040
3041         switch (ctx->base.type) {
3042         case FW_ISO_CONTEXT_TRANSMIT:
3043                 index = ctx - ohci->it_context_list;
3044                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
3045                 break;
3046
3047         case FW_ISO_CONTEXT_RECEIVE:
3048         case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3049                 index = ctx - ohci->ir_context_list;
3050                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
3051                 break;
3052         }
3053         flush_writes(ohci);
3054         context_stop(&ctx->context);
3055         tasklet_kill(&ctx->context.tasklet);
3056
3057         return 0;
3058 }
3059
3060 static void ohci_free_iso_context(struct fw_iso_context *base)
3061 {
3062         struct fw_ohci *ohci = fw_ohci(base->card);
3063         struct iso_context *ctx = container_of(base, struct iso_context, base);
3064         unsigned long flags;
3065         int index;
3066
3067         ohci_stop_iso(base);
3068         context_release(&ctx->context);
3069         free_page((unsigned long)ctx->header);
3070
3071         spin_lock_irqsave(&ohci->lock, flags);
3072
3073         switch (base->type) {
3074         case FW_ISO_CONTEXT_TRANSMIT:
3075                 index = ctx - ohci->it_context_list;
3076                 ohci->it_context_mask |= 1 << index;
3077                 break;
3078
3079         case FW_ISO_CONTEXT_RECEIVE:
3080                 index = ctx - ohci->ir_context_list;
3081                 ohci->ir_context_mask |= 1 << index;
3082                 ohci->ir_context_channels |= 1ULL << base->channel;
3083                 break;
3084
3085         case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3086                 index = ctx - ohci->ir_context_list;
3087                 ohci->ir_context_mask |= 1 << index;
3088                 ohci->ir_context_channels |= ohci->mc_channels;
3089                 ohci->mc_channels = 0;
3090                 ohci->mc_allocated = false;
3091                 break;
3092         }
3093
3094         spin_unlock_irqrestore(&ohci->lock, flags);
3095 }
3096
3097 static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
3098 {
3099         struct fw_ohci *ohci = fw_ohci(base->card);
3100         unsigned long flags;
3101         int ret;
3102
3103         switch (base->type) {
3104         case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3105
3106                 spin_lock_irqsave(&ohci->lock, flags);
3107
3108                 /* Don't allow multichannel to grab other contexts' channels. */
3109                 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
3110                         *channels = ohci->ir_context_channels;
3111                         ret = -EBUSY;
3112                 } else {
3113                         set_multichannel_mask(ohci, *channels);
3114                         ret = 0;
3115                 }
3116
3117                 spin_unlock_irqrestore(&ohci->lock, flags);
3118
3119                 break;
3120         default:
3121                 ret = -EINVAL;
3122         }
3123
3124         return ret;
3125 }
3126
3127 #ifdef CONFIG_PM
3128 static void ohci_resume_iso_dma(struct fw_ohci *ohci)
3129 {
3130         int i;
3131         struct iso_context *ctx;
3132
3133         for (i = 0 ; i < ohci->n_ir ; i++) {
3134                 ctx = &ohci->ir_context_list[i];
3135                 if (ctx->context.running)
3136                         ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3137         }
3138
3139         for (i = 0 ; i < ohci->n_it ; i++) {
3140                 ctx = &ohci->it_context_list[i];
3141                 if (ctx->context.running)
3142                         ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3143         }
3144 }
3145 #endif
3146
3147 static int queue_iso_transmit(struct iso_context *ctx,
3148                               struct fw_iso_packet *packet,
3149                               struct fw_iso_buffer *buffer,
3150                               unsigned long payload)
3151 {
3152         struct descriptor *d, *last, *pd;
3153         struct fw_iso_packet *p;
3154         __le32 *header;
3155         dma_addr_t d_bus, page_bus;
3156         u32 z, header_z, payload_z, irq;
3157         u32 payload_index, payload_end_index, next_page_index;
3158         int page, end_page, i, length, offset;
3159
3160         p = packet;
3161         payload_index = payload;
3162
3163         if (p->skip)
3164                 z = 1;
3165         else
3166                 z = 2;
3167         if (p->header_length > 0)
3168                 z++;
3169
3170         /* Determine the first page the payload isn't contained in. */
3171         end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
3172         if (p->payload_length > 0)
3173                 payload_z = end_page - (payload_index >> PAGE_SHIFT);
3174         else
3175                 payload_z = 0;
3176
3177         z += payload_z;
3178
3179         /* Get header size in number of descriptors. */
3180         header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
3181
3182         d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
3183         if (d == NULL)
3184                 return -ENOMEM;
3185
3186         if (!p->skip) {
3187                 d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
3188                 d[0].req_count = cpu_to_le16(8);
3189                 /*
3190                  * Link the skip address to this descriptor itself.  This causes
3191                  * a context to skip a cycle whenever lost cycles or FIFO
3192                  * overruns occur, without dropping the data.  The application
3193                  * should then decide whether this is an error condition or not.
3194                  * FIXME:  Make the context's cycle-lost behaviour configurable?
3195                  */
3196                 d[0].branch_address = cpu_to_le32(d_bus | z);
3197
3198                 header = (__le32 *) &d[1];
3199                 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
3200                                         IT_HEADER_TAG(p->tag) |
3201                                         IT_HEADER_TCODE(TCODE_STREAM_DATA) |
3202                                         IT_HEADER_CHANNEL(ctx->base.channel) |
3203                                         IT_HEADER_SPEED(ctx->base.speed));
3204                 header[1] =
3205                         cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
3206                                                           p->payload_length));
3207         }
3208
3209         if (p->header_length > 0) {
3210                 d[2].req_count    = cpu_to_le16(p->header_length);
3211                 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
3212                 memcpy(&d[z], p->header, p->header_length);
3213         }
3214
3215         pd = d + z - payload_z;
3216         payload_end_index = payload_index + p->payload_length;
3217         for (i = 0; i < payload_z; i++) {
3218                 page               = payload_index >> PAGE_SHIFT;
3219                 offset             = payload_index & ~PAGE_MASK;
3220                 next_page_index    = (page + 1) << PAGE_SHIFT;
3221                 length             =
3222                         min(next_page_index, payload_end_index) - payload_index;
3223                 pd[i].req_count    = cpu_to_le16(length);
3224
3225                 page_bus = page_private(buffer->pages[page]);
3226                 pd[i].data_address = cpu_to_le32(page_bus + offset);
3227
3228                 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3229                                                  page_bus, offset, length,
3230                                                  DMA_TO_DEVICE);
3231
3232                 payload_index += length;
3233         }
3234
3235         if (p->interrupt)
3236                 irq = DESCRIPTOR_IRQ_ALWAYS;
3237         else
3238                 irq = DESCRIPTOR_NO_IRQ;
3239
3240         last = z == 2 ? d : d + z - 1;
3241         last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
3242                                      DESCRIPTOR_STATUS |
3243                                      DESCRIPTOR_BRANCH_ALWAYS |
3244                                      irq);
3245
3246         context_append(&ctx->context, d, z, header_z);
3247
3248         return 0;
3249 }
3250
3251 static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3252                                        struct fw_iso_packet *packet,
3253                                        struct fw_iso_buffer *buffer,
3254                                        unsigned long payload)
3255 {
3256         struct device *device = ctx->context.ohci->card.device;
3257         struct descriptor *d, *pd;
3258         dma_addr_t d_bus, page_bus;
3259         u32 z, header_z, rest;
3260         int i, j, length;
3261         int page, offset, packet_count, header_size, payload_per_buffer;
3262
3263         /*
3264          * The OHCI controller puts the isochronous header and trailer in the
3265          * buffer, so we need at least 8 bytes.
3266          */
3267         packet_count = packet->header_length / ctx->base.header_size;
3268         header_size  = max(ctx->base.header_size, (size_t)8);
3269
3270         /* Get header size in number of descriptors. */
3271         header_z = DIV_ROUND_UP(header_size, sizeof(*d));
3272         page     = payload >> PAGE_SHIFT;
3273         offset   = payload & ~PAGE_MASK;
3274         payload_per_buffer = packet->payload_length / packet_count;
3275
3276         for (i = 0; i < packet_count; i++) {
3277                 /* d points to the header descriptor */
3278                 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
3279                 d = context_get_descriptors(&ctx->context,
3280                                 z + header_z, &d_bus);
3281                 if (d == NULL)
3282                         return -ENOMEM;
3283
3284                 d->control      = cpu_to_le16(DESCRIPTOR_STATUS |
3285                                               DESCRIPTOR_INPUT_MORE);
3286                 if (packet->skip && i == 0)
3287                         d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3288                 d->req_count    = cpu_to_le16(header_size);
3289                 d->res_count    = d->req_count;
3290                 d->transfer_status = 0;
3291                 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3292
3293                 rest = payload_per_buffer;
3294                 pd = d;
3295                 for (j = 1; j < z; j++) {
3296                         pd++;
3297                         pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3298                                                   DESCRIPTOR_INPUT_MORE);
3299
3300                         if (offset + rest < PAGE_SIZE)
3301                                 length = rest;
3302                         else
3303                                 length = PAGE_SIZE - offset;
3304                         pd->req_count = cpu_to_le16(length);
3305                         pd->res_count = pd->req_count;
3306                         pd->transfer_status = 0;
3307
3308                         page_bus = page_private(buffer->pages[page]);
3309                         pd->data_address = cpu_to_le32(page_bus + offset);
3310
3311                         dma_sync_single_range_for_device(device, page_bus,
3312                                                          offset, length,
3313                                                          DMA_FROM_DEVICE);
3314
3315                         offset = (offset + length) & ~PAGE_MASK;
3316                         rest -= length;
3317                         if (offset == 0)
3318                                 page++;
3319                 }
3320                 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3321                                           DESCRIPTOR_INPUT_LAST |
3322                                           DESCRIPTOR_BRANCH_ALWAYS);
3323                 if (packet->interrupt && i == packet_count - 1)
3324                         pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3325
3326                 context_append(&ctx->context, d, z, header_z);
3327         }
3328
3329         return 0;
3330 }
3331
3332 static int queue_iso_buffer_fill(struct iso_context *ctx,
3333                                  struct fw_iso_packet *packet,
3334                                  struct fw_iso_buffer *buffer,
3335                                  unsigned long payload)
3336 {
3337         struct descriptor *d;
3338         dma_addr_t d_bus, page_bus;
3339         int page, offset, rest, z, i, length;
3340
3341         page   = payload >> PAGE_SHIFT;
3342         offset = payload & ~PAGE_MASK;
3343         rest   = packet->payload_length;
3344
3345         /* We need one descriptor for each page in the buffer. */
3346         z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3347
3348         if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3349                 return -EFAULT;
3350
3351         for (i = 0; i < z; i++) {
3352                 d = context_get_descriptors(&ctx->context, 1, &d_bus);
3353                 if (d == NULL)
3354                         return -ENOMEM;
3355
3356                 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3357                                          DESCRIPTOR_BRANCH_ALWAYS);
3358                 if (packet->skip && i == 0)
3359                         d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3360                 if (packet->interrupt && i == z - 1)
3361                         d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3362
3363                 if (offset + rest < PAGE_SIZE)
3364                         length = rest;
3365                 else
3366                         length = PAGE_SIZE - offset;
3367                 d->req_count = cpu_to_le16(length);
3368                 d->res_count = d->req_count;
3369                 d->transfer_status = 0;
3370
3371                 page_bus = page_private(buffer->pages[page]);
3372                 d->data_address = cpu_to_le32(page_bus + offset);
3373
3374                 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3375                                                  page_bus, offset, length,
3376                                                  DMA_FROM_DEVICE);
3377
3378                 rest -= length;
3379                 offset = 0;
3380                 page++;
3381
3382                 context_append(&ctx->context, d, 1, 0);
3383         }
3384
3385         return 0;
3386 }
3387
3388 static int ohci_queue_iso(struct fw_iso_context *base,
3389                           struct fw_iso_packet *packet,
3390                           struct fw_iso_buffer *buffer,
3391                           unsigned long payload)
3392 {
3393         struct iso_context *ctx = container_of(base, struct iso_context, base);
3394         unsigned long flags;
3395         int ret = -ENOSYS;
3396
3397         spin_lock_irqsave(&ctx->context.ohci->lock, flags);
3398         switch (base->type) {
3399         case FW_ISO_CONTEXT_TRANSMIT:
3400                 ret = queue_iso_transmit(ctx, packet, buffer, payload);
3401                 break;
3402         case FW_ISO_CONTEXT_RECEIVE:
3403                 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3404                 break;
3405         case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3406                 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3407                 break;
3408         }
3409         spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
3410
3411         return ret;
3412 }
3413
3414 static void ohci_flush_queue_iso(struct fw_iso_context *base)
3415 {
3416         struct context *ctx =
3417                         &container_of(base, struct iso_context, base)->context;
3418
3419         reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3420 }
3421
3422 static int ohci_flush_iso_completions(struct fw_iso_context *base)
3423 {
3424         struct iso_context *ctx = container_of(base, struct iso_context, base);
3425         int ret = 0;
3426
3427         tasklet_disable(&ctx->context.tasklet);
3428
3429         if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
3430                 context_tasklet((unsigned long)&ctx->context);
3431
3432                 switch (base->type) {
3433                 case FW_ISO_CONTEXT_TRANSMIT:
3434                 case FW_ISO_CONTEXT_RECEIVE:
3435                         if (ctx->header_length != 0)
3436                                 flush_iso_completions(ctx);
3437                         break;
3438                 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3439                         if (ctx->mc_completed != 0)
3440                                 flush_ir_buffer_fill(ctx);
3441                         break;
3442                 default:
3443                         ret = -ENOSYS;
3444                 }
3445
3446                 clear_bit_unlock(0, &ctx->flushing_completions);
3447                 smp_mb__after_clear_bit();
3448         }
3449
3450         tasklet_enable(&ctx->context.tasklet);
3451
3452         return ret;
3453 }
3454
3455 static const struct fw_card_driver ohci_driver = {
3456         .enable                 = ohci_enable,
3457         .read_phy_reg           = ohci_read_phy_reg,
3458         .update_phy_reg         = ohci_update_phy_reg,
3459         .set_config_rom         = ohci_set_config_rom,
3460         .send_request           = ohci_send_request,
3461         .send_response          = ohci_send_response,
3462         .cancel_packet          = ohci_cancel_packet,
3463         .enable_phys_dma        = ohci_enable_phys_dma,
3464         .read_csr               = ohci_read_csr,
3465         .write_csr              = ohci_write_csr,
3466
3467         .allocate_iso_context   = ohci_allocate_iso_context,
3468         .free_iso_context       = ohci_free_iso_context,
3469         .set_iso_channels       = ohci_set_iso_channels,
3470         .queue_iso              = ohci_queue_iso,
3471         .flush_queue_iso        = ohci_flush_queue_iso,
3472         .flush_iso_completions  = ohci_flush_iso_completions,
3473         .start_iso              = ohci_start_iso,
3474         .stop_iso               = ohci_stop_iso,
3475 };
3476
3477 #ifdef CONFIG_PPC_PMAC
3478 static void pmac_ohci_on(struct pci_dev *dev)
3479 {
3480         if (machine_is(powermac)) {
3481                 struct device_node *ofn = pci_device_to_OF_node(dev);
3482
3483                 if (ofn) {
3484                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3485                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3486                 }
3487         }
3488 }
3489
3490 static void pmac_ohci_off(struct pci_dev *dev)
3491 {
3492         if (machine_is(powermac)) {
3493                 struct device_node *ofn = pci_device_to_OF_node(dev);
3494
3495                 if (ofn) {
3496                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3497                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3498                 }
3499         }
3500 }
3501 #else
3502 static inline void pmac_ohci_on(struct pci_dev *dev) {}
3503 static inline void pmac_ohci_off(struct pci_dev *dev) {}
3504 #endif /* CONFIG_PPC_PMAC */
3505
3506 static int __devinit pci_probe(struct pci_dev *dev,
3507                                const struct pci_device_id *ent)
3508 {
3509         struct fw_ohci *ohci;
3510         u32 bus_options, max_receive, link_speed, version;
3511         u64 guid;
3512         int i, err;
3513         size_t size;
3514
3515         if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3516                 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3517                 return -ENOSYS;
3518         }
3519
3520         ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
3521         if (ohci == NULL) {
3522                 err = -ENOMEM;
3523                 goto fail;
3524         }
3525
3526         fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3527
3528         pmac_ohci_on(dev);
3529
3530         err = pci_enable_device(dev);
3531         if (err) {
3532                 dev_err(&dev->dev, "failed to enable OHCI hardware\n");
3533                 goto fail_free;
3534         }
3535
3536         pci_set_master(dev);
3537         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3538         pci_set_drvdata(dev, ohci);
3539
3540         spin_lock_init(&ohci->lock);
3541         mutex_init(&ohci->phy_reg_mutex);
3542
3543         INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
3544
3545         err = pci_request_region(dev, 0, ohci_driver_name);
3546         if (err) {
3547                 dev_err(&dev->dev, "MMIO resource unavailable\n");
3548                 goto fail_disable;
3549         }
3550
3551         ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
3552         if (ohci->registers == NULL) {
3553                 dev_err(&dev->dev, "failed to remap registers\n");
3554                 err = -ENXIO;
3555                 goto fail_iomem;
3556         }
3557
3558         for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
3559                 if ((ohci_quirks[i].vendor == dev->vendor) &&
3560                     (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3561                      ohci_quirks[i].device == dev->device) &&
3562                     (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3563                      ohci_quirks[i].revision >= dev->revision)) {
3564                         ohci->quirks = ohci_quirks[i].flags;
3565                         break;
3566                 }
3567         if (param_quirks)
3568                 ohci->quirks = param_quirks;
3569
3570         /*
3571          * Because dma_alloc_coherent() allocates at least one page,
3572          * we save space by using a common buffer for the AR request/
3573          * response descriptors and the self IDs buffer.
3574          */
3575         BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3576         BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3577         ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
3578                                                PAGE_SIZE,
3579                                                &ohci->misc_buffer_bus,
3580                                                GFP_KERNEL);
3581         if (!ohci->misc_buffer) {
3582                 err = -ENOMEM;
3583                 goto fail_iounmap;
3584         }
3585
3586         err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3587                               OHCI1394_AsReqRcvContextControlSet);
3588         if (err < 0)
3589                 goto fail_misc_buf;
3590
3591         err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3592                               OHCI1394_AsRspRcvContextControlSet);
3593         if (err < 0)
3594                 goto fail_arreq_ctx;
3595
3596         err = context_init(&ohci->at_request_ctx, ohci,
3597                            OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3598         if (err < 0)
3599                 goto fail_arrsp_ctx;
3600
3601         err = context_init(&ohci->at_response_ctx, ohci,
3602                            OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3603         if (err < 0)
3604                 goto fail_atreq_ctx;
3605
3606         reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3607         ohci->ir_context_channels = ~0ULL;
3608         ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3609         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3610         ohci->ir_context_mask = ohci->ir_context_support;
3611         ohci->n_ir = hweight32(ohci->ir_context_mask);
3612         size = sizeof(struct iso_context) * ohci->n_ir;
3613         ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
3614
3615         reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3616         ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3617         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3618         ohci->it_context_mask = ohci->it_context_support;
3619         ohci->n_it = hweight32(ohci->it_context_mask);
3620         size = sizeof(struct iso_context) * ohci->n_it;
3621         ohci->it_context_list = kzalloc(size, GFP_KERNEL);
3622
3623         if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
3624                 err = -ENOMEM;
3625                 goto fail_contexts;
3626         }
3627
3628         ohci->self_id_cpu = ohci->misc_buffer     + PAGE_SIZE/2;
3629         ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3630
3631         bus_options = reg_read(ohci, OHCI1394_BusOptions);
3632         max_receive = (bus_options >> 12) & 0xf;
3633         link_speed = bus_options & 0x7;
3634         guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3635                 reg_read(ohci, OHCI1394_GUIDLo);
3636
3637         err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
3638         if (err)
3639                 goto fail_contexts;
3640
3641         version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3642         dev_notice(&dev->dev,
3643                   "added OHCI v%x.%x device as card %d, "
3644                   "%d IR + %d IT contexts, quirks 0x%x\n",
3645                   version >> 16, version & 0xff, ohci->card.index,
3646                   ohci->n_ir, ohci->n_it, ohci->quirks);
3647
3648         return 0;
3649
3650  fail_contexts:
3651         kfree(ohci->ir_context_list);
3652         kfree(ohci->it_context_list);
3653         context_release(&ohci->at_response_ctx);
3654  fail_atreq_ctx:
3655         context_release(&ohci->at_request_ctx);
3656  fail_arrsp_ctx:
3657         ar_context_release(&ohci->ar_response_ctx);
3658  fail_arreq_ctx:
3659         ar_context_release(&ohci->ar_request_ctx);
3660  fail_misc_buf:
3661         dma_free_coherent(ohci->card.device, PAGE_SIZE,
3662                           ohci->misc_buffer, ohci->misc_buffer_bus);
3663  fail_iounmap:
3664         pci_iounmap(dev, ohci->registers);
3665  fail_iomem:
3666         pci_release_region(dev, 0);
3667  fail_disable:
3668         pci_disable_device(dev);
3669  fail_free:
3670         kfree(ohci);
3671         pmac_ohci_off(dev);
3672  fail:
3673         if (err == -ENOMEM)
3674                 dev_err(&dev->dev, "out of memory\n");
3675
3676         return err;
3677 }
3678
3679 static void pci_remove(struct pci_dev *dev)
3680 {
3681         struct fw_ohci *ohci;
3682
3683         ohci = pci_get_drvdata(dev);
3684         reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3685         flush_writes(ohci);
3686         cancel_work_sync(&ohci->bus_reset_work);
3687         fw_core_remove_card(&ohci->card);
3688
3689         /*
3690          * FIXME: Fail all pending packets here, now that the upper
3691          * layers can't queue any more.
3692          */
3693
3694         software_reset(ohci);
3695         free_irq(dev->irq, ohci);
3696
3697         if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
3698                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3699                                   ohci->next_config_rom, ohci->next_config_rom_bus);
3700         if (ohci->config_rom)
3701                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3702                                   ohci->config_rom, ohci->config_rom_bus);
3703         ar_context_release(&ohci->ar_request_ctx);
3704         ar_context_release(&ohci->ar_response_ctx);
3705         dma_free_coherent(ohci->card.device, PAGE_SIZE,
3706                           ohci->misc_buffer, ohci->misc_buffer_bus);
3707         context_release(&ohci->at_request_ctx);
3708         context_release(&ohci->at_response_ctx);
3709         kfree(ohci->it_context_list);
3710         kfree(ohci->ir_context_list);
3711         pci_disable_msi(dev);
3712         pci_iounmap(dev, ohci->registers);
3713         pci_release_region(dev, 0);
3714         pci_disable_device(dev);
3715         kfree(ohci);
3716         pmac_ohci_off(dev);
3717
3718         dev_notice(&dev->dev, "removed fw-ohci device\n");
3719 }
3720
3721 #ifdef CONFIG_PM
3722 static int pci_suspend(struct pci_dev *dev, pm_message_t state)
3723 {
3724         struct fw_ohci *ohci = pci_get_drvdata(dev);
3725         int err;
3726
3727         software_reset(ohci);
3728         free_irq(dev->irq, ohci);
3729         pci_disable_msi(dev);
3730         err = pci_save_state(dev);
3731         if (err) {
3732                 dev_err(&dev->dev, "pci_save_state failed\n");
3733                 return err;
3734         }
3735         err = pci_set_power_state(dev, pci_choose_state(dev, state));
3736         if (err)
3737                 dev_err(&dev->dev, "pci_set_power_state failed with %d\n", err);
3738         pmac_ohci_off(dev);
3739
3740         return 0;
3741 }
3742
3743 static int pci_resume(struct pci_dev *dev)
3744 {
3745         struct fw_ohci *ohci = pci_get_drvdata(dev);
3746         int err;
3747
3748         pmac_ohci_on(dev);
3749         pci_set_power_state(dev, PCI_D0);
3750         pci_restore_state(dev);
3751         err = pci_enable_device(dev);
3752         if (err) {
3753                 dev_err(&dev->dev, "pci_enable_device failed\n");
3754                 return err;
3755         }
3756
3757         /* Some systems don't setup GUID register on resume from ram  */
3758         if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3759                                         !reg_read(ohci, OHCI1394_GUIDHi)) {
3760                 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3761                 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3762         }
3763
3764         err = ohci_enable(&ohci->card, NULL, 0);
3765         if (err)
3766                 return err;
3767
3768         ohci_resume_iso_dma(ohci);
3769
3770         return 0;
3771 }
3772 #endif
3773
3774 static const struct pci_device_id pci_table[] = {
3775         { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3776         { }
3777 };
3778
3779 MODULE_DEVICE_TABLE(pci, pci_table);
3780
3781 static struct pci_driver fw_ohci_pci_driver = {
3782         .name           = ohci_driver_name,
3783         .id_table       = pci_table,
3784         .probe          = pci_probe,
3785         .remove         = pci_remove,
3786 #ifdef CONFIG_PM
3787         .resume         = pci_resume,
3788         .suspend        = pci_suspend,
3789 #endif
3790 };
3791
3792 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3793 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3794 MODULE_LICENSE("GPL");
3795
3796 /* Provide a module alias so root-on-sbp2 initrds don't break. */
3797 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
3798 MODULE_ALIAS("ohci1394");
3799 #endif
3800
3801 static int __init fw_ohci_init(void)
3802 {
3803         return pci_register_driver(&fw_ohci_pci_driver);
3804 }
3805
3806 static void __exit fw_ohci_cleanup(void)
3807 {
3808         pci_unregister_driver(&fw_ohci_pci_driver);
3809 }
3810
3811 module_init(fw_ohci_init);
3812 module_exit(fw_ohci_cleanup);