]> rtime.felk.cvut.cz Git - linux-imx.git/commitdiff
Merge branch 'master' into for-next
authorJiri Kosina <jkosina@suse.cz>
Tue, 29 Jan 2013 09:48:30 +0000 (10:48 +0100)
committerJiri Kosina <jkosina@suse.cz>
Tue, 29 Jan 2013 09:48:30 +0000 (10:48 +0100)
Conflicts:
drivers/devfreq/exynos4_bus.c

Sync with Linus' tree to be able to apply patches that are
against newer code (mvneta).

30 files changed:
1  2 
arch/powerpc/platforms/85xx/p1022_ds.c
arch/powerpc/platforms/86xx/mpc8610_hpcd.c
arch/xtensa/Kconfig
drivers/bluetooth/ath3k.c
drivers/devfreq/exynos4_bus.c
drivers/firewire/ohci.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/hid/Kconfig
drivers/isdn/mISDN/l1oip_core.c
drivers/media/platform/soc_camera/mx2_camera.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/pcmcia/rsrc_nonstatic.c
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/scsi/Kconfig
drivers/staging/octeon/ethernet.c
drivers/target/sbp/sbp_target.c
fs/btrfs/extent-tree.c
fs/btrfs/relocation.c
fs/btrfs/transaction.c
fs/hpfs/inode.c
include/linux/dma-buf.h
net/bluetooth/hci_core.c
net/sctp/sm_make_chunk.c
sound/soc/codecs/ab8500-codec.c
sound/soc/codecs/wm8974.c
sound/soc/codecs/wm8978.c
sound/soc/codecs/wm8983.c
sound/soc/codecs/wm8985.c

index 8b0e05f4f60cc7ec1065baefbf5fc19983bb20dc,7328b8d741294bba73a486fb5818acc5596af967..e346edf7f157dfd180128c84bf7522499246fe15
@@@ -215,13 -215,13 +215,13 @@@ static void p1022ds_set_monitor_port(en
        /* Map the global utilities registers. */
        guts_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
        if (!guts_node) {
 -              pr_err("p1022ds: missing global utilties device node\n");
 +              pr_err("p1022ds: missing global utilities device node\n");
                return;
        }
  
        guts = of_iomap(guts_node, 0);
        if (!guts) {
 -              pr_err("p1022ds: could not map global utilties device\n");
 +              pr_err("p1022ds: could not map global utilities device\n");
                goto exit;
        }
  
                goto exit;
        }
  
-       iprop = of_get_property(law_node, "fsl,num-laws", 0);
+       iprop = of_get_property(law_node, "fsl,num-laws", NULL);
        if (!iprop) {
                pr_err("p1022ds: LAW node is missing fsl,num-laws property\n");
                goto exit;
@@@ -416,14 -416,14 +416,14 @@@ void p1022ds_set_pixel_clock(unsigned i
        /* Map the global utilities registers. */
        guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
        if (!guts_np) {
 -              pr_err("p1022ds: missing global utilties device node\n");
 +              pr_err("p1022ds: missing global utilities device node\n");
                return;
        }
  
        guts = of_iomap(guts_np, 0);
        of_node_put(guts_np);
        if (!guts) {
 -              pr_err("p1022ds: could not map global utilties device\n");
 +              pr_err("p1022ds: could not map global utilities device\n");
                return;
        }
  
@@@ -539,7 -539,7 +539,7 @@@ static void __init p1022_ds_setup_arch(
                                };
  
                                /*
-                                * prom_update_property() is called before
+                                * of_update_property() is called before
                                 * kmalloc() is available, so the 'new' object
                                 * should be allocated in the global area.
                                 * The easiest way is to do that is to
                                 */
                                pr_info("p1022ds: disabling %s node",
                                        np2->full_name);
-                               prom_update_property(np2, &nor_status);
+                               of_update_property(np2, &nor_status);
                                of_node_put(np2);
                        }
  
  
                                pr_info("p1022ds: disabling %s node",
                                        np2->full_name);
-                               prom_update_property(np2, &nand_status);
+                               of_update_property(np2, &nand_status);
                                of_node_put(np2);
                        }
  
index 7076006f972e764e12bc4f91e5c24a47441547ad,04d9d317f741b4f30d27bba8aeb52fc7afb72e0e..d479d68fbb2bc5017b00b161975533be0c7181cf
@@@ -236,14 -236,14 +236,14 @@@ void mpc8610hpcd_set_pixel_clock(unsign
        /* Map the global utilities registers. */
        guts_np = of_find_compatible_node(NULL, NULL, "fsl,mpc8610-guts");
        if (!guts_np) {
 -              pr_err("mpc8610hpcd: missing global utilties device node\n");
 +              pr_err("mpc8610hpcd: missing global utilities device node\n");
                return;
        }
  
        guts = of_iomap(guts_np, 0);
        of_node_put(guts_np);
        if (!guts) {
 -              pr_err("mpc8610hpcd: could not map global utilties device\n");
 +              pr_err("mpc8610hpcd: could not map global utilities device\n");
                return;
        }
  
@@@ -353,5 -353,7 +353,7 @@@ define_machine(mpc86xx_hpcd) 
        .time_init              = mpc86xx_time_init,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
+ #ifdef CONFIG_PCI
        .pcibios_fixup_bus      = fsl_pcibios_fixup_bus,
+ #endif
  };
diff --combined arch/xtensa/Kconfig
index 530f18018d025a5fdd2df964e7b33e8103de044c,5aab1acabf1cb4440c0b5656ecff733bc1df7b07..68f172948a314eb6055323b9e80b33c0da2dff14
@@@ -14,13 -14,15 +14,15 @@@ config XTENS
        select MODULES_USE_ELF_RELA
        select GENERIC_PCI_IOMAP
        select ARCH_WANT_OPTIONAL_GPIOLIB
+       select CLONE_BACKWARDS
+       select IRQ_DOMAIN
        help
          Xtensa processors are 32-bit RISC machines designed by Tensilica
          primarily for embedded systems.  These processors are both
          configurable and extensible.  The Linux port to the Xtensa
          architecture supports all processor configurations and extensions,
          with reasonable minimum requirements.  The Xtensa Linux project has
 -        a home page at <http://xtensa.sourceforge.net/>.
 +        a home page at <http://www.linux-xtensa.org/>.
  
  config RWSEM_XCHGADD_ALGORITHM
        def_bool y
@@@ -147,6 -149,15 +149,15 @@@ config XTENSA_PLATFORM_S610
        select SERIAL_CONSOLE
        select NO_IOPORT
  
+ config XTENSA_PLATFORM_XTFPGA
+       bool "XTFPGA"
+       select SERIAL_CONSOLE
+       select ETHOC
+       select XTENSA_CALIBRATE_CCOUNT
+       help
+         XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
+         This hardware is capable of running a full Linux distribution.
  endchoice
  
  
@@@ -174,6 -185,17 +185,17 @@@ config CMDLIN
          time by entering them here. As a minimum, you should specify the
          memory size and the root device (e.g., mem=64M root=/dev/nfs).
  
+ config USE_OF
+       bool "Flattened Device Tree support"
+       select OF
+       select OF_EARLY_FLATTREE
+       help
+         Include support for flattened device tree machine descriptions.
+ config BUILTIN_DTB
+       string "DTB to build into the kernel image"
+       depends on OF
  source "mm/Kconfig"
  
  source "drivers/pcmcia/Kconfig"
index 9cd3cb8f888adff67935e6a12c3df641e9207785,33c9a44a967899ac5b46e9b3e9951b2e80ff164d..a8a41e07a221695684fd78770980749b25ff2b5a
@@@ -67,6 -67,7 +67,7 @@@ static struct usb_device_id ath3k_table
        { USB_DEVICE(0x13d3, 0x3304) },
        { USB_DEVICE(0x0930, 0x0215) },
        { USB_DEVICE(0x0489, 0xE03D) },
+       { USB_DEVICE(0x0489, 0xE027) },
  
        /* Atheros AR9285 Malbec with sflash firmware */
        { USB_DEVICE(0x03F0, 0x311D) },
        { USB_DEVICE(0x0CF3, 0x311D) },
        { USB_DEVICE(0x13d3, 0x3375) },
        { USB_DEVICE(0x04CA, 0x3005) },
+       { USB_DEVICE(0x04CA, 0x3006) },
+       { USB_DEVICE(0x04CA, 0x3008) },
        { USB_DEVICE(0x13d3, 0x3362) },
        { USB_DEVICE(0x0CF3, 0xE004) },
        { USB_DEVICE(0x0930, 0x0219) },
        { USB_DEVICE(0x0489, 0xe057) },
+       { USB_DEVICE(0x13d3, 0x3393) },
+       { USB_DEVICE(0x0489, 0xe04e) },
+       { USB_DEVICE(0x0489, 0xe056) },
  
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE02C) },
@@@ -103,10 -109,15 +109,15 @@@ static struct usb_device_id ath3k_blist
        { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
  
        /* Atheros AR5BBU22 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
@@@ -338,7 -349,7 +349,7 @@@ static int ath3k_load_syscfg(struct usb
  
        ret = ath3k_get_state(udev, &fw_state);
        if (ret < 0) {
 -              BT_ERR("Can't get state to change to load configration err");
 +              BT_ERR("Can't get state to change to load configuration err");
                return -EBUSY;
        }
  
index e1ac076c2917d7cb7151308610ef5cf15299582a,46d94e9e95b53ae033af012ef745822e0b7f5ccd..3f37f3b3f2689bd76438d6ac5e079ce26b433642
@@@ -73,6 -73,16 +73,16 @@@ enum busclk_level_idx 
  #define EX4210_LV_NUM (LV_2 + 1)
  #define EX4x12_LV_NUM (LV_4 + 1)
  
+ /**
+  * struct busfreq_opp_info - opp information for bus
+  * @rate:     Frequency in hertz
+  * @volt:     Voltage in microvolts corresponding to this OPP
+  */
+ struct busfreq_opp_info {
+       unsigned long rate;
+       unsigned long volt;
+ };
  struct busfreq_data {
        enum exynos4_busf_type type;
        struct device *dev;
@@@ -80,7 -90,7 +90,7 @@@
        bool disabled;
        struct regulator *vdd_int;
        struct regulator *vdd_mif; /* Exynos4412/4212 only */
-       struct opp *curr_opp;
+       struct busfreq_opp_info curr_oppinfo;
        struct exynos4_ppmu dmc[2];
  
        struct notifier_block pm_notifier;
@@@ -296,13 -306,14 +306,14 @@@ static unsigned int exynos4x12_clkdiv_s
  };
  
  
- static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+ static int exynos4210_set_busclk(struct busfreq_data *data,
+                                struct busfreq_opp_info *oppi)
  {
        unsigned int index;
        unsigned int tmp;
  
        for (index = LV_0; index < EX4210_LV_NUM; index++)
-               if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+               if (oppi->rate == exynos4210_busclk_table[index].clk)
                        break;
  
        if (index == EX4210_LV_NUM)
        return 0;
  }
  
- static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+ static int exynos4x12_set_busclk(struct busfreq_data *data,
+                                struct busfreq_opp_info *oppi)
  {
        unsigned int index;
        unsigned int tmp;
  
        for (index = LV_0; index < EX4x12_LV_NUM; index++)
-               if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+               if (oppi->rate == exynos4x12_mifclk_table[index].clk)
                        break;
  
        if (index == EX4x12_LV_NUM)
@@@ -576,11 -588,12 +588,12 @@@ static int exynos4x12_get_intspec(unsig
        return -EINVAL;
  }
  
- static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
-                              struct opp *oldopp)
+ static int exynos4_bus_setvolt(struct busfreq_data *data,
+                              struct busfreq_opp_info *oppi,
+                              struct busfreq_opp_info *oldoppi)
  {
        int err = 0, tmp;
-       unsigned long volt = opp_get_voltage(opp);
+       unsigned long volt = oppi->volt;
  
        switch (data->type) {
        case TYPE_BUSF_EXYNOS4210:
                if (err)
                        break;
  
-               tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+               tmp = exynos4x12_get_intspec(oppi->rate);
                if (tmp < 0) {
                        err = tmp;
                        regulator_set_voltage(data->vdd_mif,
-                                             opp_get_voltage(oldopp),
+                                             oldoppi->volt,
                                              MAX_SAFEVOLT);
                        break;
                }
                /*  Try to recover */
                if (err)
                        regulator_set_voltage(data->vdd_mif,
-                                             opp_get_voltage(oldopp),
+                                             oldoppi->volt,
                                              MAX_SAFEVOLT);
                break;
        default:
@@@ -626,17 -639,26 +639,26 @@@ static int exynos4_bus_target(struct de
        struct platform_device *pdev = container_of(dev, struct platform_device,
                                                    dev);
        struct busfreq_data *data = platform_get_drvdata(pdev);
-       struct opp *opp = devfreq_recommended_opp(dev, _freq, flags);
-       unsigned long freq = opp_get_freq(opp);
-       unsigned long old_freq = opp_get_freq(data->curr_opp);
+       struct opp *opp;
+       unsigned long freq;
+       unsigned long old_freq = data->curr_oppinfo.rate;
+       struct busfreq_opp_info new_oppinfo;
  
-       if (IS_ERR(opp))
+       rcu_read_lock();
+       opp = devfreq_recommended_opp(dev, _freq, flags);
+       if (IS_ERR(opp)) {
+               rcu_read_unlock();
                return PTR_ERR(opp);
+       }
+       new_oppinfo.rate = opp_get_freq(opp);
+       new_oppinfo.volt = opp_get_voltage(opp);
+       rcu_read_unlock();
+       freq = new_oppinfo.rate;
  
        if (old_freq == freq)
                return 0;
  
-       dev_dbg(dev, "targeting %lukHz %luuV\n", freq, opp_get_voltage(opp));
 -      dev_dbg(dev, "targetting %lukHz %luuV\n", freq, new_oppinfo.volt);
++      dev_dbg(dev, "targeting %lukHz %luuV\n", freq, new_oppinfo.volt);
  
        mutex_lock(&data->lock);
  
                goto out;
  
        if (old_freq < freq)
-               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+               err = exynos4_bus_setvolt(data, &new_oppinfo,
+                                         &data->curr_oppinfo);
        if (err)
                goto out;
  
        if (old_freq != freq) {
                switch (data->type) {
                case TYPE_BUSF_EXYNOS4210:
-                       err = exynos4210_set_busclk(data, opp);
+                       err = exynos4210_set_busclk(data, &new_oppinfo);
                        break;
                case TYPE_BUSF_EXYNOS4x12:
-                       err = exynos4x12_set_busclk(data, opp);
+                       err = exynos4x12_set_busclk(data, &new_oppinfo);
                        break;
                default:
                        err = -EINVAL;
                goto out;
  
        if (old_freq > freq)
-               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+               err = exynos4_bus_setvolt(data, &new_oppinfo,
+                                         &data->curr_oppinfo);
        if (err)
                goto out;
  
-       data->curr_opp = opp;
+       data->curr_oppinfo = new_oppinfo;
  out:
        mutex_unlock(&data->lock);
        return err;
@@@ -702,7 -726,7 +726,7 @@@ static int exynos4_bus_get_dev_status(s
  
        exynos4_read_ppmu(data);
        busier_dmc = exynos4_get_busier_dmc(data);
-       stat->current_frequency = opp_get_freq(data->curr_opp);
+       stat->current_frequency = data->curr_oppinfo.rate;
  
        if (busier_dmc)
                addr = S5P_VA_DMC1;
@@@ -933,6 -957,7 +957,7 @@@ static int exynos4_busfreq_pm_notifier_
        struct busfreq_data *data = container_of(this, struct busfreq_data,
                                                 pm_notifier);
        struct opp *opp;
+       struct busfreq_opp_info new_oppinfo;
        unsigned long maxfreq = ULONG_MAX;
        int err = 0;
  
  
                data->disabled = true;
  
+               rcu_read_lock();
                opp = opp_find_freq_floor(data->dev, &maxfreq);
+               if (IS_ERR(opp)) {
+                       rcu_read_unlock();
+                       dev_err(data->dev, "%s: unable to find a min freq\n",
+                               __func__);
+                       return PTR_ERR(opp);
+               }
+               new_oppinfo.rate = opp_get_freq(opp);
+               new_oppinfo.volt = opp_get_voltage(opp);
+               rcu_read_unlock();
  
-               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+               err = exynos4_bus_setvolt(data, &new_oppinfo,
+                                         &data->curr_oppinfo);
                if (err)
                        goto unlock;
  
                switch (data->type) {
                case TYPE_BUSF_EXYNOS4210:
-                       err = exynos4210_set_busclk(data, opp);
+                       err = exynos4210_set_busclk(data, &new_oppinfo);
                        break;
                case TYPE_BUSF_EXYNOS4x12:
-                       err = exynos4x12_set_busclk(data, opp);
+                       err = exynos4x12_set_busclk(data, &new_oppinfo);
                        break;
                default:
                        err = -EINVAL;
                if (err)
                        goto unlock;
  
-               data->curr_opp = opp;
+               data->curr_oppinfo = new_oppinfo;
  unlock:
                mutex_unlock(&data->lock);
                if (err)
        return NOTIFY_DONE;
  }
  
- static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
+ static int exynos4_busfreq_probe(struct platform_device *pdev)
  {
        struct busfreq_data *data;
        struct opp *opp;
        struct device *dev = &pdev->dev;
        int err = 0;
  
-       data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+       data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data), GFP_KERNEL);
        if (data == NULL) {
                dev_err(dev, "Cannot allocate memory.\n");
                return -ENOMEM;
                err = -EINVAL;
        }
        if (err)
-               goto err_regulator;
+               return err;
  
-       data->vdd_int = regulator_get(dev, "vdd_int");
+       data->vdd_int = devm_regulator_get(dev, "vdd_int");
        if (IS_ERR(data->vdd_int)) {
                dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
-               err = PTR_ERR(data->vdd_int);
-               goto err_regulator;
+               return PTR_ERR(data->vdd_int);
        }
        if (data->type == TYPE_BUSF_EXYNOS4x12) {
-               data->vdd_mif = regulator_get(dev, "vdd_mif");
+               data->vdd_mif = devm_regulator_get(dev, "vdd_mif");
                if (IS_ERR(data->vdd_mif)) {
                        dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
-                       err = PTR_ERR(data->vdd_mif);
-                       regulator_put(data->vdd_int);
-                       goto err_regulator;
+                       return PTR_ERR(data->vdd_mif);
                }
        }
  
+       rcu_read_lock();
        opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
        if (IS_ERR(opp)) {
+               rcu_read_unlock();
                dev_err(dev, "Invalid initial frequency %lu kHz.\n",
-                      exynos4_devfreq_profile.initial_freq);
-               err = PTR_ERR(opp);
-               goto err_opp_add;
+                       exynos4_devfreq_profile.initial_freq);
+               return PTR_ERR(opp);
        }
-       data->curr_opp = opp;
+       data->curr_oppinfo.rate = opp_get_freq(opp);
+       data->curr_oppinfo.volt = opp_get_voltage(opp);
+       rcu_read_unlock();
  
        platform_set_drvdata(pdev, data);
  
        busfreq_mon_reset(data);
  
        data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
-                                          &devfreq_simple_ondemand, NULL);
-       if (IS_ERR(data->devfreq)) {
-               err = PTR_ERR(data->devfreq);
-               goto err_opp_add;
-       }
+                                          "simple_ondemand", NULL);
+       if (IS_ERR(data->devfreq))
+               return PTR_ERR(data->devfreq);
  
        devfreq_register_opp_notifier(dev, data->devfreq);
  
        err = register_pm_notifier(&data->pm_notifier);
        if (err) {
                dev_err(dev, "Failed to setup pm notifier\n");
-               goto err_devfreq_add;
+               devfreq_remove_device(data->devfreq);
+               return err;
        }
  
        return 0;
- err_devfreq_add:
-       devfreq_remove_device(data->devfreq);
- err_opp_add:
-       if (data->vdd_mif)
-               regulator_put(data->vdd_mif);
-       regulator_put(data->vdd_int);
- err_regulator:
-       kfree(data);
-       return err;
  }
  
- static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
+ static int exynos4_busfreq_remove(struct platform_device *pdev)
  {
        struct busfreq_data *data = platform_get_drvdata(pdev);
  
        unregister_pm_notifier(&data->pm_notifier);
        devfreq_remove_device(data->devfreq);
-       regulator_put(data->vdd_int);
-       if (data->vdd_mif)
-               regulator_put(data->vdd_mif);
-       kfree(data);
  
        return 0;
  }
@@@ -1106,7 -1127,7 +1127,7 @@@ static const struct platform_device_id 
  
  static struct platform_driver exynos4_busfreq_driver = {
        .probe  = exynos4_busfreq_probe,
-       .remove = __devexit_p(exynos4_busfreq_remove),
+       .remove = exynos4_busfreq_remove,
        .id_table = exynos4_busfreq_id,
        .driver = {
                .name   = "exynos4-busfreq",
diff --combined drivers/firewire/ohci.c
index 638eea51376b9f8ad13d2ac09363d2225d4e1a8c,6ce6e07c38c1b3ba038ee816e26225e8b5f9ee4a..45912e6e0ac2e59b9b6a1b9001d984b9d2760ec0
@@@ -329,7 -329,7 +329,7 @@@ module_param_named(quirks, param_quirks
  MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
        ", nonatomic cycle timer = "    __stringify(QUIRK_CYCLE_TIMER)
        ", reset packet generation = "  __stringify(QUIRK_RESET_PACKET)
 -      ", AR/selfID endianess = "      __stringify(QUIRK_BE_HEADERS)
 +      ", AR/selfID endianness = "     __stringify(QUIRK_BE_HEADERS)
        ", no 1394a enhancements = "    __stringify(QUIRK_NO_1394A)
        ", disable MSI = "              __stringify(QUIRK_NO_MSI)
        ", TI SLLZ059 erratum = "       __stringify(QUIRK_TI_SLLZ059)
@@@ -3537,7 -3537,7 +3537,7 @@@ static inline void pmac_ohci_on(struct 
  static inline void pmac_ohci_off(struct pci_dev *dev) {}
  #endif /* CONFIG_PPC_PMAC */
  
- static int __devinit pci_probe(struct pci_dev *dev,
+ static int pci_probe(struct pci_dev *dev,
                               const struct pci_device_id *ent)
  {
        struct fw_ohci *ohci;
index 6b39e486113357639b422df8b5a4f197b31abed8,7944d301518ac80f2ef1b72e1b5e29b5213bc33f..6fa0fc057d9f4a76bc5572f897dbf467f8c33364
@@@ -317,7 -317,7 +317,7 @@@ static int i915_gem_pageflip_info(struc
                        seq_printf(m, "No flip due on pipe %c (plane %c)\n",
                                   pipe, plane);
                } else {
-                       if (!work->pending) {
+                       if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
                                seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
                                           pipe, plane);
                        } else {
                                seq_printf(m, "Stall check enabled, ");
                        else
                                seq_printf(m, "Stall check waiting for page flip ioctl, ");
-                       seq_printf(m, "%d prepares\n", work->pending);
+                       seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
  
                        if (work->old_fb_obj) {
                                struct drm_i915_gem_object *obj = work->old_fb_obj;
@@@ -641,6 -641,7 +641,7 @@@ static void i915_ring_error_state(struc
        seq_printf(m, "%s command stream:\n", ring_str(ring));
        seq_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
        seq_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
+       seq_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
        seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
        seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
        seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
        if (INTEL_INFO(dev)->gen >= 6) {
                seq_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
                seq_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
-               seq_printf(m, "  SYNC_0: 0x%08x\n",
-                          error->semaphore_mboxes[ring][0]);
-               seq_printf(m, "  SYNC_1: 0x%08x\n",
-                          error->semaphore_mboxes[ring][1]);
+               seq_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
+                          error->semaphore_mboxes[ring][0],
+                          error->semaphore_seqno[ring][0]);
+               seq_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
+                          error->semaphore_mboxes[ring][1],
+                          error->semaphore_seqno[ring][1]);
        }
        seq_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
        seq_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
@@@ -691,6 -694,8 +694,8 @@@ static int i915_error_state(struct seq_
        seq_printf(m, "EIR: 0x%08x\n", error->eir);
        seq_printf(m, "IER: 0x%08x\n", error->ier);
        seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+       seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+       seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
        seq_printf(m, "CCID: 0x%08x\n", error->ccid);
  
        for (i = 0; i < dev_priv->num_fence_regs; i++)
@@@ -1068,7 -1073,7 +1073,7 @@@ static int gen6_drpc_info(struct seq_fi
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 rpmodectl1, gt_core_status, rcctl1;
+       u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
        unsigned forcewake_count;
        int count=0, ret;
  
        rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
        rcctl1 = I915_READ(GEN6_RC_CONTROL);
        mutex_unlock(&dev->struct_mutex);
+       mutex_lock(&dev_priv->rps.hw_lock);
+       sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+       mutex_unlock(&dev_priv->rps.hw_lock);
  
        seq_printf(m, "Video Turbo Mode: %s\n",
                   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
        seq_printf(m, "RC6++ residency since boot: %u\n",
                   I915_READ(GEN6_GT_GFX_RC6pp));
  
+       seq_printf(m, "RC6   voltage: %dmV\n",
+                  GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+       seq_printf(m, "RC6+  voltage: %dmV\n",
+                  GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+       seq_printf(m, "RC6++ voltage: %dmV\n",
+                  GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
        return 0;
  }
  
@@@ -1273,7 -1287,7 +1287,7 @@@ static int i915_ring_freq_table(struct 
                return 0;
        }
  
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
  
        for (gpu_freq = dev_priv->rps.min_delay;
             gpu_freq <= dev_priv->rps.max_delay;
             gpu_freq++) {
-               I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
-               I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
-                          GEN6_PCODE_READ_MIN_FREQ_TABLE);
-               if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
-                             GEN6_PCODE_READY) == 0, 10)) {
-                       DRM_ERROR("pcode read of freq table timed out\n");
-                       continue;
-               }
-               ia_freq = I915_READ(GEN6_PCODE_DATA);
+               ia_freq = gpu_freq;
+               sandybridge_pcode_read(dev_priv,
+                                      GEN6_PCODE_READ_MIN_FREQ_TABLE,
+                                      &ia_freq);
                seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
        }
  
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
  
        return 0;
  }
@@@ -1398,15 -1407,15 +1407,15 @@@ static int i915_context_status(struct s
        if (ret)
                return ret;
  
-       if (dev_priv->pwrctx) {
+       if (dev_priv->ips.pwrctx) {
                seq_printf(m, "power context ");
-               describe_obj(m, dev_priv->pwrctx);
+               describe_obj(m, dev_priv->ips.pwrctx);
                seq_printf(m, "\n");
        }
  
-       if (dev_priv->renderctx) {
+       if (dev_priv->ips.renderctx) {
                seq_printf(m, "render context ");
-               describe_obj(m, dev_priv->renderctx);
+               describe_obj(m, dev_priv->ips.renderctx);
                seq_printf(m, "\n");
        }
  
@@@ -1449,7 -1458,7 +1458,7 @@@ static const char *swizzle_string(unsig
        case I915_BIT_6_SWIZZLE_9_10_17:
                return "bit9/bit10/bit17";
        case I915_BIT_6_SWIZZLE_UNKNOWN:
 -              return "unkown";
 +              return "unknown";
        }
  
        return "bug";
@@@ -1711,13 -1720,13 +1720,13 @@@ i915_max_freq_read(struct file *filp
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
  
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
  
        len = snprintf(buf, sizeof(buf),
                       "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
  
        if (len > sizeof(buf))
                len = sizeof(buf);
@@@ -1752,7 -1761,7 +1761,7 @@@ i915_max_freq_write(struct file *filp
  
        DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
  
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
  
        dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
  
        gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
  
        return cnt;
  }
@@@ -1787,13 -1796,13 +1796,13 @@@ i915_min_freq_read(struct file *filp, c
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
  
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
  
        len = snprintf(buf, sizeof(buf),
                       "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
  
        if (len > sizeof(buf))
                len = sizeof(buf);
@@@ -1826,7 -1835,7 +1835,7 @@@ i915_min_freq_write(struct file *filp, 
  
        DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
  
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
  
        dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
  
        gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
  
        return cnt;
  }
diff --combined drivers/hid/Kconfig
index eb325baa738aae5a96319c4917e0aac6fb1cbf2b,e7d6a13ec6a623ab1d4123d80a4aae37bb76aaa9..eae0c7e468cf33fdce47b081b747d33de6a2057e
@@@ -265,6 -265,15 +265,15 @@@ config HID_GYRATIO
        ---help---
        Support for Gyration remote control.
  
+ config HID_ICADE
+       tristate "ION iCade arcade controller"
+       depends on BT_HIDP
+       ---help---
+       Support for the ION iCade arcade controller to work as a joystick.
+       To compile this driver as a module, choose M here: the
+       module will be called hid-icade.
  config HID_TWINHAN
        tristate "Twinhan IR remote control"
        depends on USB_HID
@@@ -311,7 -320,7 +320,7 @@@ config HID_LOGITECH_D
        Say Y if you want support for Logitech Unifying receivers and devices.
        Unifying receivers are capable of pairing up to 6 Logitech compliant
        devices to the same receiver. Without this driver it will be handled by
 -      generic USB_HID driver and all incomming events will be multiplexed
 +      generic USB_HID driver and all incoming events will be multiplexed
        into a single mouse and a single keyboard device.
  
  config LOGITECH_FF
@@@ -728,4 -737,6 +737,6 @@@ endif # HI
  
  source "drivers/hid/usbhid/Kconfig"
  
+ source "drivers/hid/i2c-hid/Kconfig"
  endmenu
index d67126dbbb04bf056f09d3ccabfe48fb996ad599,f8e405c383a0774948c464f415ac94f77d7a5679..2c0d2c2bf94648e273b7614a65e1bc09976663e8
@@@ -277,7 -277,6 +277,6 @@@ l1oip_socket_send(struct l1oip *hc, u8 
                  u16 timebase, u8 *buf, int len)
  {
        u8 *p;
-       int multi = 0;
        u8 frame[len + 32];
        struct socket *socket = NULL;
  
                *p++ = hc->id >> 8;
                *p++ = hc->id;
        }
-       *p++ = (multi == 1) ? 0x80 : 0x00 + channel; /* m-flag, channel */
-       if (multi == 1)
-               *p++ = len; /* length */
+       *p++ =  0x00 + channel; /* m-flag, channel */
        *p++ = timebase >> 8; /* time base */
        *p++ = timebase;
  
@@@ -692,7 -689,7 +689,7 @@@ l1oip_socket_thread(void *data
        hc->sin_remote.sin_addr.s_addr = htonl(hc->remoteip);
        hc->sin_remote.sin_port = htons((unsigned short)hc->remoteport);
  
 -      /* bind to incomming port */
 +      /* bind to incoming port */
        if (socket->ops->bind(socket, (struct sockaddr *)&hc->sin_local,
                              sizeof(hc->sin_local))) {
                printk(KERN_ERR "%s: Failed to bind socket to port %d.\n",
index 4544a0734df3742706f39cbd146d7f060f7cf44e,8bda2c908aba3b17aa53dc44f96474aca76a284f..5cc55f333f84c75caf862aa494210589ba72a26c
@@@ -41,7 -41,6 +41,6 @@@
  #include <linux/videodev2.h>
  
  #include <linux/platform_data/camera-mx2.h>
- #include <mach/hardware.h>
  
  #include <asm/dma.h>
  
  
  #define CSICR1                        0x00
  #define CSICR2                        0x04
- #define CSISR                 (cpu_is_mx27() ? 0x08 : 0x18)
+ #define CSISR_IMX25           0x18
+ #define CSISR_IMX27           0x08
  #define CSISTATFIFO           0x0c
  #define CSIRFIFO              0x10
  #define CSIRXCNT              0x14
- #define CSICR3                        (cpu_is_mx27() ? 0x1C : 0x08)
+ #define CSICR3_IMX25          0x08
+ #define CSICR3_IMX27          0x1c
  #define CSIDMASA_STATFIFO     0x20
  #define CSIDMATA_STATFIFO     0x24
  #define CSIDMASA_FB1          0x28
@@@ -268,11 -269,17 +269,17 @@@ struct mx2_buffer 
        struct mx2_buf_internal         internal;
  };
  
+ enum mx2_camera_type {
+       IMX25_CAMERA,
+       IMX27_CAMERA,
+ };
  struct mx2_camera_dev {
        struct device           *dev;
        struct soc_camera_host  soc_host;
        struct soc_camera_device *icd;
-       struct clk              *clk_csi, *clk_emma_ahb, *clk_emma_ipg;
+       struct clk              *clk_emma_ahb, *clk_emma_ipg;
+       struct clk              *clk_csi_ahb, *clk_csi_per;
  
        void __iomem            *base_csi, *base_emma;
  
        struct mx2_buffer       *fb2_active;
  
        u32                     csicr1;
+       u32                     reg_csisr;
+       u32                     reg_csicr3;
+       enum mx2_camera_type    devtype;
  
        struct mx2_buf_internal buf_discard[2];
        void                    *discard_buffer;
        struct vb2_alloc_ctx    *alloc_ctx;
  };
  
+ static struct platform_device_id mx2_camera_devtype[] = {
+       {
+               .name = "imx25-camera",
+               .driver_data = IMX25_CAMERA,
+       }, {
+               .name = "imx27-camera",
+               .driver_data = IMX27_CAMERA,
+       }, {
+               /* sentinel */
+       }
+ };
+ MODULE_DEVICE_TABLE(platform, mx2_camera_devtype);
+ static inline int is_imx25_camera(struct mx2_camera_dev *pcdev)
+ {
+       return pcdev->devtype == IMX25_CAMERA;
+ }
+ static inline int is_imx27_camera(struct mx2_camera_dev *pcdev)
+ {
+       return pcdev->devtype == IMX27_CAMERA;
+ }
  static struct mx2_buffer *mx2_ibuf_to_buf(struct mx2_buf_internal *int_buf)
  {
        return container_of(int_buf, struct mx2_buffer, internal);
@@@ -312,7 -345,7 +345,7 @@@ static struct mx2_fmt_cfg mx27_emma_prp
        /*
         * This is a generic configuration which is valid for most
         * prp input-output format combinations.
 -       * We set the incomming and outgoing pixelformat to a
 +       * We set the incoming and outgoing pixelformat to a
         * 16 Bit wide format and adjust the bytesperline
         * accordingly. With this configuration the inputdata
         * will not be changed by the emma and could be any type
@@@ -432,11 -465,12 +465,12 @@@ static void mx2_camera_deactivate(struc
  {
        unsigned long flags;
  
-       clk_disable_unprepare(pcdev->clk_csi);
+       clk_disable_unprepare(pcdev->clk_csi_ahb);
+       clk_disable_unprepare(pcdev->clk_csi_per);
        writel(0, pcdev->base_csi + CSICR1);
-       if (cpu_is_mx27()) {
+       if (is_imx27_camera(pcdev)) {
                writel(0, pcdev->base_emma + PRP_CNTL);
-       } else if (cpu_is_mx25()) {
+       } else if (is_imx25_camera(pcdev)) {
                spin_lock_irqsave(&pcdev->lock, flags);
                pcdev->fb1_active = NULL;
                pcdev->fb2_active = NULL;
@@@ -460,13 -494,17 +494,17 @@@ static int mx2_camera_add_device(struc
        if (pcdev->icd)
                return -EBUSY;
  
-       ret = clk_prepare_enable(pcdev->clk_csi);
+       ret = clk_prepare_enable(pcdev->clk_csi_ahb);
        if (ret < 0)
                return ret;
  
+       ret = clk_prepare_enable(pcdev->clk_csi_per);
+       if (ret < 0)
+               goto exit_csi_ahb;
        csicr1 = CSICR1_MCLKEN;
  
-       if (cpu_is_mx27())
+       if (is_imx27_camera(pcdev))
                csicr1 |= CSICR1_PRP_IF_EN | CSICR1_FCC |
                        CSICR1_RXFF_LEVEL(0);
  
                 icd->devnum);
  
        return 0;
+ exit_csi_ahb:
+       clk_disable_unprepare(pcdev->clk_csi_ahb);
+       return ret;
  }
  
  static void mx2_camera_remove_device(struct soc_camera_device *icd)
@@@ -542,7 -585,7 +585,7 @@@ out
  static irqreturn_t mx25_camera_irq(int irq_csi, void *data)
  {
        struct mx2_camera_dev *pcdev = data;
-       u32 status = readl(pcdev->base_csi + CSISR);
+       u32 status = readl(pcdev->base_csi + pcdev->reg_csisr);
  
        if (status & CSISR_DMA_TSF_FB1_INT)
                mx25_camera_frame_done(pcdev, 1, MX2_STATE_DONE);
  
        /* FIXME: handle CSISR_RFF_OR_INT */
  
-       writel(status, pcdev->base_csi + CSISR);
+       writel(status, pcdev->base_csi + pcdev->reg_csisr);
  
        return IRQ_HANDLED;
  }
@@@ -636,7 -679,7 +679,7 @@@ static void mx2_videobuf_queue(struct v
        buf->state = MX2_STATE_QUEUED;
        list_add_tail(&buf->internal.queue, &pcdev->capture);
  
-       if (cpu_is_mx25()) {
+       if (is_imx25_camera(pcdev)) {
                u32 csicr3, dma_inten = 0;
  
                if (pcdev->fb1_active == NULL) {
                        list_del(&buf->internal.queue);
                        buf->state = MX2_STATE_ACTIVE;
  
-                       csicr3 = readl(pcdev->base_csi + CSICR3);
+                       csicr3 = readl(pcdev->base_csi + pcdev->reg_csicr3);
  
                        /* Reflash DMA */
                        writel(csicr3 | CSICR3_DMA_REFLASH_RFF,
-                                       pcdev->base_csi + CSICR3);
+                                       pcdev->base_csi + pcdev->reg_csicr3);
  
                        /* clear & enable interrupts */
-                       writel(dma_inten, pcdev->base_csi + CSISR);
+                       writel(dma_inten, pcdev->base_csi + pcdev->reg_csisr);
                        pcdev->csicr1 |= dma_inten;
                        writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
  
                        /* enable DMA */
                        csicr3 |= CSICR3_DMA_REQ_EN_RFF | CSICR3_RXFF_LEVEL(1);
-                       writel(csicr3, pcdev->base_csi + CSICR3);
+                       writel(csicr3, pcdev->base_csi + pcdev->reg_csicr3);
                }
        }
  
@@@ -712,7 -755,7 +755,7 @@@ static void mx2_videobuf_release(struc
         */
  
        spin_lock_irqsave(&pcdev->lock, flags);
-       if (cpu_is_mx25() && buf->state == MX2_STATE_ACTIVE) {
+       if (is_imx25_camera(pcdev) && buf->state == MX2_STATE_ACTIVE) {
                if (pcdev->fb1_active == buf) {
                        pcdev->csicr1 &= ~CSICR1_FB1_DMA_INTEN;
                        writel(0, pcdev->base_csi + CSIDMASA_FB1);
@@@ -835,7 -878,7 +878,7 @@@ static int mx2_start_streaming(struct v
        unsigned long phys;
        int bytesperline;
  
-       if (cpu_is_mx27()) {
+       if (is_imx27_camera(pcdev)) {
                unsigned long flags;
                if (count < 2)
                        return -EINVAL;
  
                bytesperline = soc_mbus_bytes_per_line(icd->user_width,
                                icd->current_fmt->host_fmt);
-               if (bytesperline < 0)
+               if (bytesperline < 0) {
+                       spin_unlock_irqrestore(&pcdev->lock, flags);
                        return bytesperline;
+               }
  
                /*
                 * I didn't manage to properly enable/disable the prp
                pcdev->discard_buffer = dma_alloc_coherent(ici->v4l2_dev.dev,
                                pcdev->discard_size, &pcdev->discard_buffer_dma,
                                GFP_KERNEL);
-               if (!pcdev->discard_buffer)
+               if (!pcdev->discard_buffer) {
+                       spin_unlock_irqrestore(&pcdev->lock, flags);
                        return -ENOMEM;
+               }
  
                pcdev->buf_discard[0].discard = true;
                list_add_tail(&pcdev->buf_discard[0].queue,
@@@ -930,7 -977,7 +977,7 @@@ static int mx2_stop_streaming(struct vb
        void *b;
        u32 cntl;
  
-       if (cpu_is_mx27()) {
+       if (is_imx27_camera(pcdev)) {
                spin_lock_irqsave(&pcdev->lock, flags);
  
                cntl = readl(pcdev->base_emma + PRP_CNTL);
@@@ -1082,11 -1129,11 +1129,11 @@@ static int mx2_camera_set_bus_param(str
        if (bytesperline < 0)
                return bytesperline;
  
-       if (cpu_is_mx27()) {
+       if (is_imx27_camera(pcdev)) {
                ret = mx27_camera_emma_prp_reset(pcdev);
                if (ret)
                        return ret;
-       } else if (cpu_is_mx25()) {
+       } else if (is_imx25_camera(pcdev)) {
                writel((bytesperline * icd->user_height) >> 2,
                                pcdev->base_csi + CSIRXCNT);
                writel((bytesperline << 16) | icd->user_height,
  }
  
  static int mx2_camera_set_crop(struct soc_camera_device *icd,
-                               struct v4l2_crop *a)
+                               const struct v4l2_crop *a)
  {
-       struct v4l2_rect *rect = &a->c;
+       struct v4l2_crop a_writable = *a;
+       struct v4l2_rect *rect = &a_writable.c;
        struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
        struct v4l2_mbus_framefmt mf;
        int ret;
@@@ -1392,7 -1440,7 +1440,7 @@@ static int mx2_camera_try_fmt(struct so
        /* FIXME: implement MX27 limits */
  
        /* limit to MX25 hardware capabilities */
-       if (cpu_is_mx25()) {
+       if (is_imx25_camera(pcdev)) {
                if (xlate->host_fmt->bits_per_sample <= 8)
                        width_limit = 0xffff * 4;
                else
@@@ -1644,7 -1692,7 +1692,7 @@@ static irqreturn_t mx27_camera_emma_irq
        return IRQ_HANDLED;
  }
  
- static int __devinit mx27_camera_emma_init(struct platform_device *pdev)
+ static int mx27_camera_emma_init(struct platform_device *pdev)
  {
        struct mx2_camera_dev *pcdev = platform_get_drvdata(pdev);
        struct resource *res_emma;
@@@ -1702,7 -1750,7 +1750,7 @@@ out
        return err;
  }
  
- static int __devinit mx2_camera_probe(struct platform_device *pdev)
+ static int mx2_camera_probe(struct platform_device *pdev)
  {
        struct mx2_camera_dev *pcdev;
        struct resource *res_csi;
                goto exit;
        }
  
-       pcdev->clk_csi = devm_clk_get(&pdev->dev, "ahb");
-       if (IS_ERR(pcdev->clk_csi)) {
-               dev_err(&pdev->dev, "Could not get csi clock\n");
-               err = PTR_ERR(pcdev->clk_csi);
+       pcdev->devtype = pdev->id_entry->driver_data;
+       switch (pcdev->devtype) {
+       case IMX25_CAMERA:
+               pcdev->reg_csisr = CSISR_IMX25;
+               pcdev->reg_csicr3 = CSICR3_IMX25;
+               break;
+       case IMX27_CAMERA:
+               pcdev->reg_csisr = CSISR_IMX27;
+               pcdev->reg_csicr3 = CSICR3_IMX27;
+               break;
+       default:
+               break;
+       }
+       pcdev->clk_csi_ahb = devm_clk_get(&pdev->dev, "ahb");
+       if (IS_ERR(pcdev->clk_csi_ahb)) {
+               dev_err(&pdev->dev, "Could not get csi ahb clock\n");
+               err = PTR_ERR(pcdev->clk_csi_ahb);
+               goto exit;
+       }
+       pcdev->clk_csi_per = devm_clk_get(&pdev->dev, "per");
+       if (IS_ERR(pcdev->clk_csi_per)) {
+               dev_err(&pdev->dev, "Could not get csi per clock\n");
+               err = PTR_ERR(pcdev->clk_csi_per);
                goto exit;
        }
  
  
                pcdev->platform_flags = pcdev->pdata->flags;
  
-               rate = clk_round_rate(pcdev->clk_csi, pcdev->pdata->clk * 2);
+               rate = clk_round_rate(pcdev->clk_csi_per,
+                                               pcdev->pdata->clk * 2);
                if (rate <= 0) {
                        err = -ENODEV;
                        goto exit;
                }
-               err = clk_set_rate(pcdev->clk_csi, rate);
+               err = clk_set_rate(pcdev->clk_csi_per, rate);
                if (err < 0)
                        goto exit;
        }
        pcdev->dev = &pdev->dev;
        platform_set_drvdata(pdev, pcdev);
  
-       if (cpu_is_mx25()) {
+       if (is_imx25_camera(pcdev)) {
                err = devm_request_irq(&pdev->dev, irq_csi, mx25_camera_irq, 0,
                                       MX2_CAM_DRV_NAME, pcdev);
                if (err) {
                }
        }
  
-       if (cpu_is_mx27()) {
+       if (is_imx27_camera(pcdev)) {
                err = mx27_camera_emma_init(pdev);
                if (err)
                        goto exit;
        pcdev->soc_host.priv            = pcdev;
        pcdev->soc_host.v4l2_dev.dev    = &pdev->dev;
        pcdev->soc_host.nr              = pdev->id;
-       if (cpu_is_mx25())
+       if (is_imx25_camera(pcdev))
                pcdev->soc_host.capabilities = SOCAM_HOST_CAP_STRIDE;
  
        pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
                goto exit_free_emma;
  
        dev_info(&pdev->dev, "MX2 Camera (CSI) driver probed, clock frequency: %ld\n",
-                       clk_get_rate(pcdev->clk_csi));
+                       clk_get_rate(pcdev->clk_csi_per));
  
        return 0;
  
  exit_free_emma:
        vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
  eallocctx:
-       if (cpu_is_mx27()) {
+       if (is_imx27_camera(pcdev)) {
                clk_disable_unprepare(pcdev->clk_emma_ipg);
                clk_disable_unprepare(pcdev->clk_emma_ahb);
        }
@@@ -1817,7 -1887,7 +1887,7 @@@ exit
        return err;
  }
  
- static int __devexit mx2_camera_remove(struct platform_device *pdev)
+ static int mx2_camera_remove(struct platform_device *pdev)
  {
        struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
        struct mx2_camera_dev *pcdev = container_of(soc_host,
  
        vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
  
-       if (cpu_is_mx27()) {
+       if (is_imx27_camera(pcdev)) {
                clk_disable_unprepare(pcdev->clk_emma_ipg);
                clk_disable_unprepare(pcdev->clk_emma_ahb);
        }
@@@ -1841,7 -1911,8 +1911,8 @@@ static struct platform_driver mx2_camer
        .driver         = {
                .name   = MX2_CAM_DRV_NAME,
        },
-       .remove         = __devexit_p(mx2_camera_remove),
+       .id_table       = mx2_camera_devtype,
+       .remove         = mx2_camera_remove,
  };
  
  
index 2bf4c08efad9a3486a826414ec130eb9accdfaf6,5523da3afcdccd23f4b90c6191f722d02063c1d3..c9ec54730d1d3fbc1bbbbe602ae30d4704ac4707
@@@ -79,7 -79,7 +79,7 @@@
  /* Time in jiffies before concluding the transmitter is hung */
  #define TX_TIMEOUT            (5*HZ)
  
- static char version[] __devinitdata =
+ static char version[] =
        "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
        DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  
@@@ -127,6 -127,17 +127,17 @@@ MODULE_PARM_DESC(debug, " Default debu
  
  struct workqueue_struct *bnx2x_wq;
  
+ struct bnx2x_mac_vals {
+       u32 xmac_addr;
+       u32 xmac_val;
+       u32 emac_addr;
+       u32 emac_val;
+       u32 umac_addr;
+       u32 umac_val;
+       u32 bmac_addr;
+       u32 bmac_val[2];
+ };
  enum bnx2x_board_type {
        BCM57710 = 0,
        BCM57711,
  /* indexed by board_type, above */
  static struct {
        char *name;
- } board_info[] __devinitdata = {
+ } board_info[] = {
        { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
        { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
        { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
@@@ -791,10 -802,9 +802,9 @@@ void bnx2x_panic_dump(struct bnx2x *bp
  
                /* host sb data */
  
- #ifdef BCM_CNIC
                if (IS_FCOE_FP(fp))
                        continue;
- #endif
                BNX2X_ERR("     run indexes (");
                for (j = 0; j < HC_SB_MAX_SM; j++)
                        pr_cont("0x%x%s",
  #ifdef BNX2X_STOP_ON_ERROR
        /* Rings */
        /* Rx */
-       for_each_rx_queue(bp, i) {
+       for_each_valid_rx_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
  
                start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
        }
  
        /* Tx */
-       for_each_tx_queue(bp, i) {
+       for_each_valid_tx_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
                for_each_cos_in_tx_queue(fp, cos) {
                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
@@@ -1483,7 -1493,7 +1493,7 @@@ static void bnx2x_igu_int_disable(struc
                BNX2X_ERR("BUG! proper val not read from IGU!\n");
  }
  
- void bnx2x_int_disable(struct bnx2x *bp)
static void bnx2x_int_disable(struct bnx2x *bp)
  {
        if (bp->common.int_block == INT_BLOCK_HC)
                bnx2x_hc_int_disable(bp);
@@@ -1504,9 -1514,8 +1514,8 @@@ void bnx2x_int_disable_sync(struct bnx2
        if (msix) {
                synchronize_irq(bp->msix_table[0].vector);
                offset = 1;
- #ifdef BCM_CNIC
-               offset++;
- #endif
+               if (CNIC_SUPPORT(bp))
+                       offset++;
                for_each_eth_queue(bp, i)
                        synchronize_irq(bp->msix_table[offset++].vector);
        } else
@@@ -1588,9 -1597,8 +1597,8 @@@ static bool bnx2x_trylock_leader_lock(s
        return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
  }
  
- #ifdef BCM_CNIC
  static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
- #endif
  
  void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
  {
@@@ -1720,7 -1728,7 +1728,7 @@@ irqreturn_t bnx2x_interrupt(int irq, vo
        for_each_eth_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
  
-               mask = 0x2 << (fp->index + CNIC_PRESENT);
+               mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
                if (status & mask) {
                        /* Handle Rx or Tx according to SB id */
                        prefetch(fp->rx_cons_sb);
                }
        }
  
- #ifdef BCM_CNIC
-       mask = 0x2;
-       if (status & (mask | 0x1)) {
-               struct cnic_ops *c_ops = NULL;
+       if (CNIC_SUPPORT(bp)) {
+               mask = 0x2;
+               if (status & (mask | 0x1)) {
+                       struct cnic_ops *c_ops = NULL;
  
-               if (likely(bp->state == BNX2X_STATE_OPEN)) {
-                       rcu_read_lock();
-                       c_ops = rcu_dereference(bp->cnic_ops);
-                       if (c_ops)
-                               c_ops->cnic_handler(bp->cnic_data, NULL);
-                       rcu_read_unlock();
-               }
+                       if (likely(bp->state == BNX2X_STATE_OPEN)) {
+                               rcu_read_lock();
+                               c_ops = rcu_dereference(bp->cnic_ops);
+                               if (c_ops)
+                                       c_ops->cnic_handler(bp->cnic_data,
+                                                           NULL);
+                               rcu_read_unlock();
+                       }
  
-               status &= ~mask;
+                       status &= ~mask;
+               }
        }
- #endif
  
        if (unlikely(status & 0x1)) {
                queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@@ -2034,40 -2043,39 +2043,39 @@@ int bnx2x_set_gpio_int(struct bnx2x *bp
        return 0;
  }
  
- static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
+ static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
  {
-       u32 spio_mask = (1 << spio_num);
        u32 spio_reg;
  
-       if ((spio_num < MISC_REGISTERS_SPIO_4) ||
-           (spio_num > MISC_REGISTERS_SPIO_7)) {
-               BNX2X_ERR("Invalid SPIO %d\n", spio_num);
+       /* Only 2 SPIOs are configurable */
+       if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
+               BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
                return -EINVAL;
        }
  
        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
        /* read SPIO and mask except the float bits */
-       spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
+       spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
  
        switch (mode) {
-       case MISC_REGISTERS_SPIO_OUTPUT_LOW:
-               DP(NETIF_MSG_HW, "Set SPIO %d -> output low\n", spio_num);
+       case MISC_SPIO_OUTPUT_LOW:
+               DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
                /* clear FLOAT and set CLR */
-               spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
-               spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
+               spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
+               spio_reg |=  (spio << MISC_SPIO_CLR_POS);
                break;
  
-       case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
-               DP(NETIF_MSG_HW, "Set SPIO %d -> output high\n", spio_num);
+       case MISC_SPIO_OUTPUT_HIGH:
+               DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
                /* clear FLOAT and set SET */
-               spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
-               spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
+               spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
+               spio_reg |=  (spio << MISC_SPIO_SET_POS);
                break;
  
-       case MISC_REGISTERS_SPIO_INPUT_HI_Z:
-               DP(NETIF_MSG_HW, "Set SPIO %d -> input\n", spio_num);
+       case MISC_SPIO_INPUT_HI_Z:
+               DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
                /* set FLOAT */
-               spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+               spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
                break;
  
        default:
@@@ -2106,22 -2114,25 +2114,25 @@@ void bnx2x_calc_fc_adv(struct bnx2x *bp
        }
  }
  
u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
static void bnx2x_set_requested_fc(struct bnx2x *bp)
  {
-       if (!BP_NOMCP(bp)) {
-               u8 rc;
-               int cfx_idx = bnx2x_get_link_cfg_idx(bp);
-               u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
-               /*
-                * Initialize link parameters structure variables
-                * It is recommended to turn off RX FC for jumbo frames
-                * for better performance
-                */
-               if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
-                       bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
-               else
-                       bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+       /* Initialize link parameters structure variables
+        * It is recommended to turn off RX FC for jumbo frames
+        *  for better performance
+        */
+       if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
+               bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
+       else
+               bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+ }
+ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+ {
+       int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
+       u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
  
+       if (!BP_NOMCP(bp)) {
+               bnx2x_set_requested_fc(bp);
                bnx2x_acquire_phy_lock(bp);
  
                if (load_mode == LOAD_DIAG) {
  
                bnx2x_calc_fc_adv(bp);
  
-               if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
+               if (bp->link_vars.link_up) {
                        bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
                        bnx2x_link_report(bp);
-               } else
-                       queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+               }
+               queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
                bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
                return rc;
        }
@@@ -3075,11 -3086,13 +3086,13 @@@ static void bnx2x_drv_info_ether_stat(s
  
  static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
  {
- #ifdef BCM_CNIC
        struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
        struct fcoe_stats_info *fcoe_stat =
                &bp->slowpath->drv_info_to_mcp.fcoe_stat;
  
+       if (!CNIC_LOADED(bp))
+               return;
        memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
               bp->fip_mac, ETH_ALEN);
  
  
        /* ask L5 driver to add data to the struct */
        bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
- #endif
  }
  
  static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
  {
- #ifdef BCM_CNIC
        struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
        struct iscsi_stats_info *iscsi_stat =
                &bp->slowpath->drv_info_to_mcp.iscsi_stat;
  
+       if (!CNIC_LOADED(bp))
+               return;
        memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
               bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
  
  
        /* ask L5 driver to add data to the struct */
        bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
- #endif
  }
  
  /* called due to MCP event (on pmf):
@@@ -3589,6 -3602,21 +3602,21 @@@ static void bnx2x_attn_int_asserted(str
  
        /* now set back the mask */
        if (asserted & ATTN_NIG_FOR_FUNC) {
+               /* Verify that IGU ack through BAR was written before restoring
+                * NIG mask. This loop should exit after 2-3 iterations max.
+                */
+               if (bp->common.int_block != INT_BLOCK_HC) {
+                       u32 cnt = 0, igu_acked;
+                       do {
+                               igu_acked = REG_RD(bp,
+                                                  IGU_REG_ATTENTION_ACK_BITS);
+                       } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
+                                (++cnt < MAX_IGU_ATTN_ACK_TO));
+                       if (!igu_acked)
+                               DP(NETIF_MSG_HW,
+                                  "Failed to verify IGU ack on time\n");
+                       barrier();
+               }
                REG_WR(bp, nig_int_mask_addr, nig_mask);
                bnx2x_release_phy_lock(bp);
        }
@@@ -4572,7 -4600,6 +4600,6 @@@ static void bnx2x_update_eq_prod(struc
        mmiowb(); /* keep prod updates ordered */
  }
  
- #ifdef BCM_CNIC
  static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
                                      union event_ring_elem *elem)
  {
        bnx2x_cnic_cfc_comp(bp, cid, err);
        return 0;
  }
- #endif
  
  static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
  {
@@@ -4635,11 -4661,9 +4661,9 @@@ static void bnx2x_handle_classification
        switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
        case BNX2X_FILTER_MAC_PENDING:
                DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
- #ifdef BCM_CNIC
-               if (cid == BNX2X_ISCSI_ETH_CID(bp))
+               if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
                        vlan_mac_obj = &bp->iscsi_l2_mac_obj;
                else
- #endif
                        vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
  
                break;
  
  }
  
- #ifdef BCM_CNIC
  static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
- #endif
  
  static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
  {
        /* Send rx_mode command again if was requested */
        if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
                bnx2x_set_storm_rx_mode(bp);
- #ifdef BCM_CNIC
        else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
                                    &bp->sp_state))
                bnx2x_set_iscsi_eth_rx_mode(bp, true);
        else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
                                    &bp->sp_state))
                bnx2x_set_iscsi_eth_rx_mode(bp, false);
- #endif
  
        netif_addr_unlock_bh(bp->dev);
  }
@@@ -4747,7 -4767,6 +4767,6 @@@ static void bnx2x_after_function_update
                                  q);
        }
  
- #ifdef BCM_CNIC
        if (!NO_FCOE(bp)) {
                fp = &bp->fp[FCOE_IDX(bp)];
                queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
                bnx2x_link_report(bp);
                bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
        }
- #else
-       /* If no FCoE ring - ACK MCP now */
-       bnx2x_link_report(bp);
-       bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
- #endif /* BCM_CNIC */
  }
  
  static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
        struct bnx2x *bp, u32 cid)
  {
        DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
- #ifdef BCM_CNIC
-       if (cid == BNX2X_FCOE_ETH_CID(bp))
+       if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
                return &bnx2x_fcoe_sp_obj(bp, q_obj);
        else
- #endif
                return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
  }
  
@@@ -4793,6 -4806,7 +4806,7 @@@ static void bnx2x_eq_int(struct bnx2x *
  {
        u16 hw_cons, sw_cons, sw_prod;
        union event_ring_elem *elem;
+       u8 echo;
        u32 cid;
        u8 opcode;
        int spqe_cnt = 0;
                         */
                        DP(BNX2X_MSG_SP,
                           "got delete ramrod for MULTI[%d]\n", cid);
- #ifdef BCM_CNIC
-                       if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
+                       if (CNIC_LOADED(bp) &&
+                           !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
                                goto next_spqe;
- #endif
                        q_obj = bnx2x_cid_to_q_obj(bp, cid);
  
                        if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
                                break;
                        bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
                        goto next_spqe;
                case EVENT_RING_OPCODE_FUNCTION_UPDATE:
-                       DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
-                          "AFEX: ramrod completed FUNCTION_UPDATE\n");
-                       f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE);
+                       echo = elem->message.data.function_update_event.echo;
+                       if (echo == SWITCH_UPDATE) {
+                               DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+                                  "got FUNC_SWITCH_UPDATE ramrod\n");
+                               if (f_obj->complete_cmd(
+                                       bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
+                                       break;
  
-                       /* We will perform the Queues update from sp_rtnl task
-                        * as all Queue SP operations should run under
-                        * rtnl_lock.
-                        */
-                       smp_mb__before_clear_bit();
-                       set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
-                               &bp->sp_rtnl_state);
-                       smp_mb__after_clear_bit();
+                       } else {
+                               DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
+                                  "AFEX: ramrod completed FUNCTION_UPDATE\n");
+                               f_obj->complete_cmd(bp, f_obj,
+                                                   BNX2X_F_CMD_AFEX_UPDATE);
+                               /* We will perform the Queues update from
+                                * sp_rtnl task as all Queue SP operations
+                                * should run under rtnl_lock.
+                                */
+                               smp_mb__before_clear_bit();
+                               set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
+                                       &bp->sp_rtnl_state);
+                               smp_mb__after_clear_bit();
+                               schedule_delayed_work(&bp->sp_rtnl_task, 0);
+                       }
  
-                       schedule_delayed_work(&bp->sp_rtnl_task, 0);
                        goto next_spqe;
  
                case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
@@@ -4999,11 -5027,10 +5027,10 @@@ static void bnx2x_sp_task(struct work_s
  
        /* SP events: STAT_QUERY and others */
        if (status & BNX2X_DEF_SB_IDX) {
- #ifdef BCM_CNIC
                struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
  
-               if ((!NO_FCOE(bp)) &&
-                       (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+               if (FCOE_INIT(bp) &&
+                   (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
                        /*
                         * Prevent local bottom-halves from running as
                         * we are going to change the local NAPI list.
                        napi_schedule(&bnx2x_fcoe(bp, napi));
                        local_bh_enable();
                }
- #endif
                /* Handle EQ completions */
                bnx2x_eq_int(bp);
  
@@@ -5050,8 -5077,7 +5077,7 @@@ irqreturn_t bnx2x_msix_sp_int(int irq, 
                return IRQ_HANDLED;
  #endif
  
- #ifdef BCM_CNIC
-       {
+       if (CNIC_LOADED(bp)) {
                struct cnic_ops *c_ops;
  
                rcu_read_lock();
                        c_ops->cnic_handler(bp->cnic_data, NULL);
                rcu_read_unlock();
        }
- #endif
        queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
  
        return IRQ_HANDLED;
@@@ -5498,12 -5524,10 +5524,10 @@@ void bnx2x_set_storm_rx_mode(struct bnx
        unsigned long rx_mode_flags = 0, ramrod_flags = 0;
        unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
  
- #ifdef BCM_CNIC
        if (!NO_FCOE(bp))
  
                /* Configure rx_mode of FCoE Queue */
                __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
- #endif
  
        switch (bp->rx_mode) {
        case BNX2X_RX_MODE_NONE:
@@@ -5624,12 -5648,12 +5648,12 @@@ static void bnx2x_init_internal(struct 
  
  static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
  {
-       return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
+       return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
  }
  
  static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
  {
-       return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
+       return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
  }
  
  static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
@@@ -5720,23 -5744,25 +5744,25 @@@ static void bnx2x_init_tx_ring_one(stru
        txdata->tx_pkt = 0;
  }
  
+ static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
+ {
+       int i;
+       for_each_tx_queue_cnic(bp, i)
+               bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
+ }
  static void bnx2x_init_tx_rings(struct bnx2x *bp)
  {
        int i;
        u8 cos;
  
-       for_each_tx_queue(bp, i)
+       for_each_eth_queue(bp, i)
                for_each_cos_in_tx_queue(&bp->fp[i], cos)
                        bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
  }
  
- void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+ void bnx2x_nic_init_cnic(struct bnx2x *bp)
  {
-       int i;
-       for_each_eth_queue(bp, i)
-               bnx2x_init_eth_fp(bp, i);
- #ifdef BCM_CNIC
        if (!NO_FCOE(bp))
                bnx2x_init_fcoe_fp(bp);
  
                      BNX2X_VF_ID_INVALID, false,
                      bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
  
- #endif
+       /* ensure status block indices were read */
+       rmb();
+       bnx2x_init_rx_rings_cnic(bp);
+       bnx2x_init_tx_rings_cnic(bp);
+       /* flush all */
+       mb();
+       mmiowb();
+ }
  
+ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+ {
+       int i;
+       for_each_eth_queue(bp, i)
+               bnx2x_init_eth_fp(bp, i);
        /* Initialize MOD_ABS interrupts */
        bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
                               bp->common.shmem_base, bp->common.shmem2_base,
@@@ -6031,10 -6071,9 +6071,9 @@@ static int bnx2x_int_mem_test(struct bn
        msleep(50);
        bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
        bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
- #ifndef BCM_CNIC
-       /* set NIC mode */
-       REG_WR(bp, PRS_REG_NIC_MODE, 1);
- #endif
+       if (!CNIC_SUPPORT(bp))
+               /* set NIC mode */
+               REG_WR(bp, PRS_REG_NIC_MODE, 1);
  
        /* Enable inputs of parser neighbor blocks */
        REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
  
  static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
  {
+       u32 val;
        REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
        if (!CHIP_IS_E1x(bp))
                REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
  /*    REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
  /*    REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
  
-       if (CHIP_REV_IS_FPGA(bp))
-               REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
-       else if (!CHIP_IS_E1x(bp))
-               REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
-                          (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
-                               | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
-                               | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
-                               | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
-                               | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
-       else
-               REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
+       val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT  |
+               PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
+               PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
+       if (!CHIP_IS_E1x(bp))
+               val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
+                       PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
+       REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
        REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
        REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
        REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
@@@ -6185,18 -6223,16 +6223,16 @@@ static void bnx2x_setup_fan_failure_det
                return;
  
        /* Fan failure is indicated by SPIO 5 */
-       bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
-                      MISC_REGISTERS_SPIO_INPUT_HI_Z);
+       bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
  
        /* set to active low mode */
        val = REG_RD(bp, MISC_REG_SPIO_INT);
-       val |= ((1 << MISC_REGISTERS_SPIO_5) <<
-                                       MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+       val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
        REG_WR(bp, MISC_REG_SPIO_INT, val);
  
        /* enable interrupt to signal the IGU */
        val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
-       val |= (1 << MISC_REGISTERS_SPIO_5);
+       val |= MISC_SPIO_SPIO5;
        REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
  }
  
@@@ -6256,6 -6292,10 +6292,10 @@@ void bnx2x_pf_disable(struct bnx2x *bp
  static void bnx2x__common_init_phy(struct bnx2x *bp)
  {
        u32 shmem_base[2], shmem2_base[2];
+       /* Avoid common init in case MFW supports LFA */
+       if (SHMEM2_RD(bp, size) >
+           (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+               return;
        shmem_base[0] =  bp->common.shmem_base;
        shmem2_base[0] = bp->common.shmem2_base;
        if (!CHIP_IS_E1x(bp)) {
@@@ -6522,9 -6562,8 +6562,8 @@@ static int bnx2x_init_hw_common(struct 
        REG_WR(bp, QM_REG_SOFT_RESET, 1);
        REG_WR(bp, QM_REG_SOFT_RESET, 0);
  
- #ifdef BCM_CNIC
-       bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
- #endif
+       if (CNIC_SUPPORT(bp))
+               bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
  
        bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
        REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
  
        bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
  
- #ifdef BCM_CNIC
-       REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
-       REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
-       REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
-       REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
-       REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
-       REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
-       REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
-       REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
-       REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
-       REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
- #endif
+       if (CNIC_SUPPORT(bp)) {
+               REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+               REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+               REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+               REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+               REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+               REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+               REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+               REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+               REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+               REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+       }
        REG_WR(bp, SRC_REG_SOFT_RST, 0);
  
        if (sizeof(union cdu_context) != 1024)
@@@ -6786,16 -6825,17 +6825,17 @@@ static int bnx2x_init_hw_port(struct bn
        /* QM cid (connection) count */
        bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
  
- #ifdef BCM_CNIC
-       bnx2x_init_block(bp, BLOCK_TM, init_phase);
-       REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
-       REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
- #endif
+       if (CNIC_SUPPORT(bp)) {
+               bnx2x_init_block(bp, BLOCK_TM, init_phase);
+               REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+               REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+       }
  
        bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
  
+       bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
        if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
-               bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
  
                if (IS_MF(bp))
                        low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
                REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
        }
  
- #ifdef BCM_CNIC
-       bnx2x_init_block(bp, BLOCK_SRC, init_phase);
- #endif
+       if (CNIC_SUPPORT(bp))
+               bnx2x_init_block(bp, BLOCK_SRC, init_phase);
        bnx2x_init_block(bp, BLOCK_CDU, init_phase);
        bnx2x_init_block(bp, BLOCK_CFC, init_phase);
  
  
        /* If SPIO5 is set to generate interrupts, enable it for this port */
        val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
-       if (val & (1 << MISC_REGISTERS_SPIO_5)) {
+       if (val & MISC_SPIO_SPIO5) {
                u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
                                       MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
                val = REG_RD(bp, reg_addr);
@@@ -7039,6 -7079,130 +7079,130 @@@ static void bnx2x_clear_func_ilt(struc
                bnx2x_ilt_wr(bp, i, 0);
  }
  
+ static void bnx2x_init_searcher(struct bnx2x *bp)
+ {
+       int port = BP_PORT(bp);
+       bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
+       /* T1 hash bits value determines the T1 number of entries */
+       REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
+ }
+ static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
+ {
+       int rc;
+       struct bnx2x_func_state_params func_params = {NULL};
+       struct bnx2x_func_switch_update_params *switch_update_params =
+               &func_params.params.switch_update;
+       /* Prepare parameters for function state transitions */
+       __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+       __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+       func_params.f_obj = &bp->func_obj;
+       func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+       /* Function parameters */
+       switch_update_params->suspend = suspend;
+       rc = bnx2x_func_state_change(bp, &func_params);
+       return rc;
+ }
+ static int bnx2x_reset_nic_mode(struct bnx2x *bp)
+ {
+       int rc, i, port = BP_PORT(bp);
+       int vlan_en = 0, mac_en[NUM_MACS];
+       /* Close input from network */
+       if (bp->mf_mode == SINGLE_FUNCTION) {
+               bnx2x_set_rx_filter(&bp->link_params, 0);
+       } else {
+               vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
+                                  NIG_REG_LLH0_FUNC_EN);
+               REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+                         NIG_REG_LLH0_FUNC_EN, 0);
+               for (i = 0; i < NUM_MACS; i++) {
+                       mac_en[i] = REG_RD(bp, port ?
+                                            (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+                                             4 * i) :
+                                            (NIG_REG_LLH0_FUNC_MEM_ENABLE +
+                                             4 * i));
+                       REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+                                             4 * i) :
+                                 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
+               }
+       }
+       /* Close BMC to host */
+       REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+              NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
+       /* Suspend Tx switching to the PF. Completion of this ramrod
+        * further guarantees that all the packets of that PF / child
+        * VFs in BRB were processed by the Parser, so it is safe to
+        * change the NIC_MODE register.
+        */
+       rc = bnx2x_func_switch_update(bp, 1);
+       if (rc) {
+               BNX2X_ERR("Can't suspend tx-switching!\n");
+               return rc;
+       }
+       /* Change NIC_MODE register */
+       REG_WR(bp, PRS_REG_NIC_MODE, 0);
+       /* Open input from network */
+       if (bp->mf_mode == SINGLE_FUNCTION) {
+               bnx2x_set_rx_filter(&bp->link_params, 1);
+       } else {
+               REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+                         NIG_REG_LLH0_FUNC_EN, vlan_en);
+               for (i = 0; i < NUM_MACS; i++) {
+                       REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+                                             4 * i) :
+                                 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
+                                 mac_en[i]);
+               }
+       }
+       /* Enable BMC to host */
+       REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+              NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
+       /* Resume Tx switching to the PF */
+       rc = bnx2x_func_switch_update(bp, 0);
+       if (rc) {
+               BNX2X_ERR("Can't resume tx-switching!\n");
+               return rc;
+       }
+       DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+       return 0;
+ }
+ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
+ {
+       int rc;
+       bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
+       if (CONFIGURE_NIC_MODE(bp)) {
+               /* Configrue searcher as part of function hw init */
+               bnx2x_init_searcher(bp);
+               /* Reset NIC mode */
+               rc = bnx2x_reset_nic_mode(bp);
+               if (rc)
+                       BNX2X_ERR("Can't change NIC mode!\n");
+               return rc;
+       }
+       return 0;
+ }
  static int bnx2x_init_hw_func(struct bnx2x *bp)
  {
        int port = BP_PORT(bp);
        }
        bnx2x_ilt_init_op(bp, INITOP_SET);
  
- #ifdef BCM_CNIC
-       bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
-       /* T1 hash bits value determines the T1 number of entries */
-       REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
- #endif
+       if (!CONFIGURE_NIC_MODE(bp)) {
+               bnx2x_init_searcher(bp);
+               REG_WR(bp, PRS_REG_NIC_MODE, 0);
+               DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+       } else {
+               /* Set NIC mode */
+               REG_WR(bp, PRS_REG_NIC_MODE, 1);
+               DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
  
- #ifndef BCM_CNIC
-       /* set NIC mode */
-       REG_WR(bp, PRS_REG_NIC_MODE, 1);
- #endif  /* BCM_CNIC */
+       }
  
        if (!CHIP_IS_E1x(bp)) {
                u32 pf_conf = IGU_PF_CONF_FUNC_EN;
  }
  
  
+ void bnx2x_free_mem_cnic(struct bnx2x *bp)
+ {
+       bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
+       if (!CHIP_IS_E1x(bp))
+               BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
+                              sizeof(struct host_hc_status_block_e2));
+       else
+               BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
+                              sizeof(struct host_hc_status_block_e1x));
+       BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+ }
  void bnx2x_free_mem(struct bnx2x *bp)
  {
        int i;
  
        BNX2X_FREE(bp->ilt->lines);
  
- #ifdef BCM_CNIC
-       if (!CHIP_IS_E1x(bp))
-               BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
-                              sizeof(struct host_hc_status_block_e2));
-       else
-               BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
-                              sizeof(struct host_hc_status_block_e1x));
-       BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
- #endif
        BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
  
        BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
@@@ -7444,24 -7610,44 +7610,44 @@@ alloc_mem_err
        return -ENOMEM;
  }
  
- int bnx2x_alloc_mem(struct bnx2x *bp)
+ int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
  {
-       int i, allocated, context_size;
- #ifdef BCM_CNIC
        if (!CHIP_IS_E1x(bp))
                /* size = the status block + ramrod buffers */
                BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
                                sizeof(struct host_hc_status_block_e2));
        else
-               BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
-                               sizeof(struct host_hc_status_block_e1x));
+               BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
+                               &bp->cnic_sb_mapping,
+                               sizeof(struct
+                                      host_hc_status_block_e1x));
  
-       /* allocate searcher T2 table */
-       BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
- #endif
+       if (CONFIGURE_NIC_MODE(bp))
+               /* allocate searcher T2 table, as it wan't allocated before */
+               BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+       /* write address to which L5 should insert its values */
+       bp->cnic_eth_dev.addr_drv_info_to_mcp =
+               &bp->slowpath->drv_info_to_mcp;
+       if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
+               goto alloc_mem_err;
+       return 0;
+ alloc_mem_err:
+       bnx2x_free_mem_cnic(bp);
+       BNX2X_ERR("Can't allocate memory\n");
+       return -ENOMEM;
+ }
+ int bnx2x_alloc_mem(struct bnx2x *bp)
+ {
+       int i, allocated, context_size;
  
+       if (!CONFIGURE_NIC_MODE(bp))
+               /* allocate searcher T2 table */
+               BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
  
        BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
                        sizeof(struct host_sp_status_block));
        BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
                        sizeof(struct bnx2x_slowpath));
  
- #ifdef BCM_CNIC
-       /* write address to which L5 should insert its values */
-       bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp;
- #endif
        /* Allocated memory for FW statistics  */
        if (bnx2x_alloc_fw_stats_mem(bp))
                goto alloc_mem_err;
@@@ -7595,14 -7776,12 +7776,12 @@@ int bnx2x_set_eth_mac(struct bnx2x *bp
  {
        unsigned long ramrod_flags = 0;
  
- #ifdef BCM_CNIC
        if (is_zero_ether_addr(bp->dev->dev_addr) &&
            (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
                DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
                   "Ignoring Zero MAC for STORAGE SD mode\n");
                return 0;
        }
- #endif
  
        DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
  
@@@ -7631,7 -7810,8 +7810,8 @@@ void bnx2x_set_int_mode(struct bnx2x *b
                bnx2x_enable_msi(bp);
                /* falling through... */
        case INT_MODE_INTx:
-               bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+               bp->num_ethernet_queues = 1;
+               bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
                BNX2X_DEV_INFO("set number of queues to 1\n");
                break;
        default:
                    bp->flags & USING_SINGLE_MSIX_FLAG) {
                        /* failed to enable multiple MSI-X */
                        BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
-                                      bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
+                                      bp->num_queues,
+                                      1 + bp->num_cnic_queues);
  
-                       bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+                       bp->num_queues = 1 + bp->num_cnic_queues;
  
                        /* Try to enable MSI */
                        if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
@@@ -7678,9 -7859,9 +7859,9 @@@ void bnx2x_ilt_set_info(struct bnx2x *b
        ilt_client->flags = ILT_CLIENT_SKIP_MEM;
        ilt_client->start = line;
        line += bnx2x_cid_ilt_lines(bp);
- #ifdef BCM_CNIC
-       line += CNIC_ILT_LINES;
- #endif
+       if (CNIC_SUPPORT(bp))
+               line += CNIC_ILT_LINES;
        ilt_client->end = line - 1;
  
        DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
                   ilog2(ilt_client->page_size >> 12));
  
        }
-       /* SRC */
-       ilt_client = &ilt->clients[ILT_CLIENT_SRC];
- #ifdef BCM_CNIC
-       ilt_client->client_num = ILT_CLIENT_SRC;
-       ilt_client->page_size = SRC_ILT_PAGE_SZ;
-       ilt_client->flags = 0;
-       ilt_client->start = line;
-       line += SRC_ILT_LINES;
-       ilt_client->end = line - 1;
  
-       DP(NETIF_MSG_IFUP,
-          "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
-          ilt_client->start,
-          ilt_client->end,
-          ilt_client->page_size,
-          ilt_client->flags,
-          ilog2(ilt_client->page_size >> 12));
+       if (CNIC_SUPPORT(bp)) {
+               /* SRC */
+               ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+               ilt_client->client_num = ILT_CLIENT_SRC;
+               ilt_client->page_size = SRC_ILT_PAGE_SZ;
+               ilt_client->flags = 0;
+               ilt_client->start = line;
+               line += SRC_ILT_LINES;
+               ilt_client->end = line - 1;
  
- #else
-       ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
- #endif
+               DP(NETIF_MSG_IFUP,
+                  "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+                  ilt_client->start,
+                  ilt_client->end,
+                  ilt_client->page_size,
+                  ilt_client->flags,
+                  ilog2(ilt_client->page_size >> 12));
  
-       /* TM */
-       ilt_client = &ilt->clients[ILT_CLIENT_TM];
- #ifdef BCM_CNIC
-       ilt_client->client_num = ILT_CLIENT_TM;
-       ilt_client->page_size = TM_ILT_PAGE_SZ;
-       ilt_client->flags = 0;
-       ilt_client->start = line;
-       line += TM_ILT_LINES;
-       ilt_client->end = line - 1;
+               /* TM */
+               ilt_client = &ilt->clients[ILT_CLIENT_TM];
+               ilt_client->client_num = ILT_CLIENT_TM;
+               ilt_client->page_size = TM_ILT_PAGE_SZ;
+               ilt_client->flags = 0;
+               ilt_client->start = line;
+               line += TM_ILT_LINES;
+               ilt_client->end = line - 1;
  
-       DP(NETIF_MSG_IFUP,
-          "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
-          ilt_client->start,
-          ilt_client->end,
-          ilt_client->page_size,
-          ilt_client->flags,
-          ilog2(ilt_client->page_size >> 12));
+               DP(NETIF_MSG_IFUP,
+                  "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+                  ilt_client->start,
+                  ilt_client->end,
+                  ilt_client->page_size,
+                  ilt_client->flags,
+                  ilog2(ilt_client->page_size >> 12));
+       }
  
- #else
-       ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
- #endif
        BUG_ON(line > ILT_MAX_LINES);
  }
  
@@@ -7822,7 -7997,7 +7997,7 @@@ static void bnx2x_pf_q_prep_init(struc
        }
  }
  
- int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                        struct bnx2x_queue_state_params *q_params,
                        struct bnx2x_queue_setup_tx_only_params *tx_only_params,
                        int tx_index, bool leading)
@@@ -7923,6 -8098,9 +8098,9 @@@ int bnx2x_setup_queue(struct bnx2x *bp
        /* Set the command */
        q_params.cmd = BNX2X_Q_CMD_SETUP;
  
+       if (IS_FCOE_FP(fp))
+               bp->fcoe_init = true;
        /* Change the state to SETUP */
        rc = bnx2x_queue_state_change(bp, &q_params);
        if (rc) {
@@@ -8036,12 -8214,12 +8214,12 @@@ static void bnx2x_reset_func(struct bnx
                           SB_DISABLED);
        }
  
- #ifdef BCM_CNIC
-       /* CNIC SB */
-       REG_WR8(bp, BAR_CSTRORM_INTMEM +
-               CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
-               SB_DISABLED);
- #endif
+       if (CNIC_LOADED(bp))
+               /* CNIC SB */
+               REG_WR8(bp, BAR_CSTRORM_INTMEM +
+                       CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
+                       (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
        /* SP SB */
        REG_WR8(bp, BAR_CSTRORM_INTMEM +
                   CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
                REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
        }
  
- #ifdef BCM_CNIC
-       /* Disable Timer scan */
-       REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
-       /*
-        * Wait for at least 10ms and up to 2 second for the timers scan to
-        * complete
-        */
-       for (i = 0; i < 200; i++) {
-               msleep(10);
-               if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
-                       break;
+       if (CNIC_LOADED(bp)) {
+               /* Disable Timer scan */
+               REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+               /*
+                * Wait for at least 10ms and up to 2 second for the timers
+                * scan to complete
+                */
+               for (i = 0; i < 200; i++) {
+                       msleep(10);
+                       if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+                               break;
+               }
        }
- #endif
        /* Clear ILT */
        bnx2x_clear_func_ilt(bp, func);
  
@@@ -8408,13 -8586,24 +8586,24 @@@ void bnx2x_chip_cleanup(struct bnx2x *b
        /* Close multi and leading connections
         * Completions for ramrods are collected in a synchronous way
         */
-       for_each_queue(bp, i)
+       for_each_eth_queue(bp, i)
                if (bnx2x_stop_queue(bp, i))
  #ifdef BNX2X_STOP_ON_ERROR
                        return;
  #else
                        goto unload_error;
  #endif
+       if (CNIC_LOADED(bp)) {
+               for_each_cnic_queue(bp, i)
+                       if (bnx2x_stop_queue(bp, i))
+ #ifdef BNX2X_STOP_ON_ERROR
+                               return;
+ #else
+                               goto unload_error;
+ #endif
+       }
        /* If SP settings didn't get completed so far - something
         * very wrong has happen.
         */
@@@ -8436,6 -8625,8 +8625,8 @@@ unload_error
        bnx2x_netif_stop(bp, 1);
        /* Delete all NAPI objects */
        bnx2x_del_all_napi(bp);
+       if (CNIC_LOADED(bp))
+               bnx2x_del_all_napi_cnic(bp);
  
        /* Release IRQs */
        bnx2x_free_irq(bp);
@@@ -8498,7 -8689,7 +8689,7 @@@ static void bnx2x_set_234_gates(struct 
                       (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
                       (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
        } else {
 -              /* Prevent incomming interrupts in IGU */
 +              /* Prevent incoming interrupts in IGU */
                val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
  
                REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
@@@ -8557,7 -8748,8 +8748,8 @@@ static void bnx2x_reset_mcp_prep(struc
  
        /* Get shmem offset */
        shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
-       validity_offset = offsetof(struct shmem_region, validity_map[0]);
+       validity_offset =
+               offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
  
        /* Clear validity map flags */
        if (shmem > 0)
@@@ -8650,7 -8842,11 +8842,11 @@@ static void bnx2x_process_kill_chip_res
                MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
                MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
  
-       /* Don't reset the following blocks */
+       /* Don't reset the following blocks.
+        * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
+        *            reset, as in 4 port device they might still be owned
+        *            by the MCP (there is only one leader per path).
+        */
        not_reset_mask1 =
                MISC_REGISTERS_RESET_REG_1_RST_HC |
                MISC_REGISTERS_RESET_REG_1_RST_PXPV |
                MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
                MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
                MISC_REGISTERS_RESET_REG_2_RST_ATC |
-               MISC_REGISTERS_RESET_REG_2_PGLC;
+               MISC_REGISTERS_RESET_REG_2_PGLC |
+               MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
+               MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
+               MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
+               MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
+               MISC_REGISTERS_RESET_REG_2_UMAC0 |
+               MISC_REGISTERS_RESET_REG_2_UMAC1;
  
        /*
         * Keep the following blocks in reset:
         *  - all xxMACs are handled by the bnx2x_link code.
         */
        stay_reset2 =
-               MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
-               MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
-               MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
-               MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
-               MISC_REGISTERS_RESET_REG_2_UMAC0 |
-               MISC_REGISTERS_RESET_REG_2_UMAC1 |
                MISC_REGISTERS_RESET_REG_2_XMAC |
                MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
  
@@@ -8768,6 -8964,7 +8964,7 @@@ static int bnx2x_process_kill(struct bn
        int cnt = 1000;
        u32 val = 0;
        u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+               u32 tags_63_32 = 0;
  
  
        /* Empty the Tetris buffer, wait for 1s */
                port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
                port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
                pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+               if (CHIP_IS_E3(bp))
+                       tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
                if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
                    ((port_is_idle_0 & 0x1) == 0x1) &&
                    ((port_is_idle_1 & 0x1) == 0x1) &&
-                   (pgl_exp_rom2 == 0xffffffff))
+                   (pgl_exp_rom2 == 0xffffffff) &&
+                   (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
                        break;
                usleep_range(1000, 1000);
        } while (cnt-- > 0);
  
        /* TBD: Add resetting the NO_MCP mode DB here */
  
-       /* PXP */
-       bnx2x_pxp_prep(bp);
        /* Open the gates #2, #3 and #4 */
        bnx2x_set_234_gates(bp, false);
  
        return 0;
  }
  
- int bnx2x_leader_reset(struct bnx2x *bp)
static int bnx2x_leader_reset(struct bnx2x *bp)
  {
        int rc = 0;
        bool global = bnx2x_reset_is_global(bp);
@@@ -9233,12 -9431,19 +9431,19 @@@ static inline void bnx2x_undi_int_disab
                bnx2x_undi_int_disable_e1h(bp);
  }
  
- static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp)
+ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
+                                       struct bnx2x_mac_vals *vals)
  {
        u32 val, base_addr, offset, mask, reset_reg;
        bool mac_stopped = false;
        u8 port = BP_PORT(bp);
  
+       /* reset addresses as they also mark which values were changed */
+       vals->bmac_addr = 0;
+       vals->umac_addr = 0;
+       vals->xmac_addr = 0;
+       vals->emac_addr = 0;
        reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
  
        if (!CHIP_IS_E3(bp)) {
                         */
                        wb_data[0] = REG_RD(bp, base_addr + offset);
                        wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+                       vals->bmac_addr = base_addr + offset;
+                       vals->bmac_val[0] = wb_data[0];
+                       vals->bmac_val[1] = wb_data[1];
                        wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
-                       REG_WR(bp, base_addr + offset, wb_data[0]);
-                       REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
+                       REG_WR(bp, vals->bmac_addr, wb_data[0]);
+                       REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
  
                }
                BNX2X_DEV_INFO("Disable emac Rx\n");
-               REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
+               vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
+               vals->emac_val = REG_RD(bp, vals->emac_addr);
+               REG_WR(bp, vals->emac_addr, 0);
                mac_stopped = true;
        } else {
                if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
                               val & ~(1 << 1));
                        REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
                               val | (1 << 1));
-                       REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
+                       vals->xmac_addr = base_addr + XMAC_REG_CTRL;
+                       vals->xmac_val = REG_RD(bp, vals->xmac_addr);
+                       REG_WR(bp, vals->xmac_addr, 0);
                        mac_stopped = true;
                }
                mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
                if (mask & reset_reg) {
                        BNX2X_DEV_INFO("Disable umac Rx\n");
                        base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
-                       REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
+                       vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
+                       vals->umac_val = REG_RD(bp, vals->umac_addr);
+                       REG_WR(bp, vals->umac_addr, 0);
                        mac_stopped = true;
                }
        }
  #define BNX2X_PREV_UNDI_BD(val)               ((val) >> 16 & 0xffff)
  #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
  
- static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
-                                                u8 inc)
+ static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
  {
        u16 rcq, bd;
        u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
                       port, bd, rcq);
  }
  
- static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
+ static int bnx2x_prev_mcp_done(struct bnx2x *bp)
  {
        u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
                                  DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
        return 0;
  }
  
- static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp)
+ static struct bnx2x_prev_path_list *
+               bnx2x_prev_path_get_entry(struct bnx2x *bp)
+ {
+       struct bnx2x_prev_path_list *tmp_list;
+       list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
+               if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
+                   bp->pdev->bus->number == tmp_list->bus &&
+                   BP_PATH(bp) == tmp_list->path)
+                       return tmp_list;
+       return NULL;
+ }
+ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
  {
        struct bnx2x_prev_path_list *tmp_list;
        int rc = false;
        return rc;
  }
  
- static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
+ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
  {
        struct bnx2x_prev_path_list *tmp_list;
        int rc;
        tmp_list->bus = bp->pdev->bus->number;
        tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
        tmp_list->path = BP_PATH(bp);
+       tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
  
        rc = down_interruptible(&bnx2x_prev_sem);
        if (rc) {
        return rc;
  }
  
- static int __devinit bnx2x_do_flr(struct bnx2x *bp)
+ static int bnx2x_do_flr(struct bnx2x *bp)
  {
        int i;
        u16 status;
@@@ -9421,7 -9648,7 +9648,7 @@@ clear
        return 0;
  }
  
- static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
+ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
  {
        int rc;
  
        return rc;
  }
  
- static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
+ static int bnx2x_prev_unload_common(struct bnx2x *bp)
  {
        u32 reset_reg, tmp_reg = 0, rc;
+       bool prev_undi = false;
+       struct bnx2x_mac_vals mac_vals;
        /* It is possible a previous function received 'common' answer,
         * but hasn't loaded yet, therefore creating a scenario of
         * multiple functions receiving 'common' on the same path.
         */
        BNX2X_DEV_INFO("Common unload Flow\n");
  
+       memset(&mac_vals, 0, sizeof(mac_vals));
        if (bnx2x_prev_is_path_marked(bp))
                return bnx2x_prev_mcp_done(bp);
  
        /* Reset should be performed after BRB is emptied */
        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
                u32 timer_count = 1000;
-               bool prev_undi = false;
  
                /* Close the MAC Rx to prevent BRB from filling up */
-               bnx2x_prev_unload_close_mac(bp);
+               bnx2x_prev_unload_close_mac(bp, &mac_vals);
+               /* close LLH filters towards the BRB */
+               bnx2x_set_rx_filter(&bp->link_params, 0);
  
                /* Check if the UNDI driver was previously loaded
                 * UNDI driver initializes CID offset for normal bell to 0x7
        /* No packets are in the pipeline, path is ready for reset */
        bnx2x_reset_common(bp);
  
-       rc = bnx2x_prev_mark_path(bp);
+       if (mac_vals.xmac_addr)
+               REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
+       if (mac_vals.umac_addr)
+               REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+       if (mac_vals.emac_addr)
+               REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
+       if (mac_vals.bmac_addr) {
+               REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
+               REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
+       }
+       rc = bnx2x_prev_mark_path(bp, prev_undi);
        if (rc) {
                bnx2x_prev_mcp_done(bp);
                return rc;
   * to clear the interrupt which detected this from the pglueb and the was done
   * bit
   */
- static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
+ static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
  {
-       u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
-       if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
-               BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
-               REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
+       if (!CHIP_IS_E1x(bp)) {
+               u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
+               if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
+                       BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+                       REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+                              1 << BP_FUNC(bp));
+               }
        }
  }
  
- static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
+ static int bnx2x_prev_unload(struct bnx2x *bp)
  {
        int time_counter = 10;
        u32 rc, fw, hw_lock_reg, hw_lock_val;
+       struct bnx2x_prev_path_list *prev_list;
        BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
  
        /* clear hw from errors which may have resulted from an interrupted
                rc = -EBUSY;
        }
  
+       /* Mark function if its port was used to boot from SAN */
+       prev_list = bnx2x_prev_path_get_entry(bp);
+       if (prev_list && (prev_list->undi & (1 << BP_PORT(bp))))
+               bp->link_params.feature_config_flags |=
+                       FEATURE_CONFIG_BOOT_FROM_SAN;
        BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
  
        return rc;
  }
  
- static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
+ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
  {
        u32 val, val2, val3, val4, id, boot_mode;
        u16 pmc;
  
        bp->link_params.shmem_base = bp->common.shmem_base;
        bp->link_params.shmem2_base = bp->common.shmem2_base;
+       if (SHMEM2_RD(bp, size) >
+           (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+               bp->link_params.lfa_base =
+               REG_RD(bp, bp->common.shmem2_base +
+                      (u32)offsetof(struct shmem2_region,
+                                    lfa_host_addr[BP_PORT(bp)]));
+       else
+               bp->link_params.lfa_base = 0;
        BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
                       bp->common.shmem_base, bp->common.shmem2_base);
  
        bp->link_params.feature_config_flags |=
                (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
                FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+       bp->link_params.feature_config_flags |=
+               (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
+               FEATURE_CONFIG_MT_SUPPORT : 0;
        bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
                        BC_SUPPORTS_PFC_STATS : 0;
  
  #define IGU_FID(val)  GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
  #define IGU_VEC(val)  GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
  
- static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
+ static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
  {
        int pfid = BP_FUNC(bp);
        int igu_sb_id;
                bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
                        (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
  
-               return;
+               return 0;
        }
  
        /* IGU in normal mode - read CAM */
        bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
  #endif
  
-       if (igu_sb_cnt == 0)
+       if (igu_sb_cnt == 0) {
                BNX2X_ERR("CAM configuration error\n");
+               return -EINVAL;
+       }
+       return 0;
  }
  
- static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
-                                                   u32 switch_cfg)
+ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
  {
        int cfg_size = 0, idx, port = BP_PORT(bp);
  
                       bp->port.supported[1]);
  }
  
- static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
+ static void bnx2x_link_settings_requested(struct bnx2x *bp)
  {
        u32 link_config, idx, cfg_size = 0;
        bp->port.advertising[0] = 0;
  
                bp->link_params.req_flow_ctrl[idx] = (link_config &
                                         PORT_FEATURE_FLOW_CONTROL_MASK);
-               if ((bp->link_params.req_flow_ctrl[idx] ==
-                    BNX2X_FLOW_CTRL_AUTO) &&
-                   !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
-                       bp->link_params.req_flow_ctrl[idx] =
-                               BNX2X_FLOW_CTRL_NONE;
+               if (bp->link_params.req_flow_ctrl[idx] ==
+                   BNX2X_FLOW_CTRL_AUTO) {
+                       if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
+                               bp->link_params.req_flow_ctrl[idx] =
+                                                       BNX2X_FLOW_CTRL_NONE;
+                       else
+                               bnx2x_set_requested_fc(bp);
                }
  
                BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
        }
  }
  
- static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
+ static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
  {
        mac_hi = cpu_to_be16(mac_hi);
        mac_lo = cpu_to_be32(mac_lo);
        memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
  }
  
- static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
+ static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
  {
        int port = BP_PORT(bp);
        u32 config;
                bp->mdio.prtad =
                        XGXS_EXT_PHY_ADDR(ext_phy_config);
  
-       /*
-        * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
-        * In MF mode, it is set to cover self test cases
-        */
-       if (IS_MF(bp))
-               bp->port.need_hw_lock = 1;
-       else
-               bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
-                                                       bp->common.shmem_base,
-                                                       bp->common.shmem2_base);
        /* Configure link feature according to nvram value */
        eee_mode = (((SHMEM_RD(bp, dev_info.
                      port_feature_config[port].eee_power_mode)) &
  void bnx2x_get_iscsi_info(struct bnx2x *bp)
  {
        u32 no_flags = NO_ISCSI_FLAG;
- #ifdef BCM_CNIC
        int port = BP_PORT(bp);
        u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
                                drv_lic_key[port].max_iscsi_conn);
  
+       if (!CNIC_SUPPORT(bp)) {
+               bp->flags |= no_flags;
+               return;
+       }
        /* Get the number of maximum allowed iSCSI connections */
        bp->cnic_eth_dev.max_iscsi_conn =
                (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
         */
        if (!bp->cnic_eth_dev.max_iscsi_conn)
                bp->flags |= no_flags;
- #else
-       bp->flags |= no_flags;
- #endif
  }
  
- #ifdef BCM_CNIC
- static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
+ static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
  {
        /* Port info */
        bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
        bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
                MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
  }
- #endif
- static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
+ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
  {
- #ifdef BCM_CNIC
        int port = BP_PORT(bp);
        int func = BP_ABS_FUNC(bp);
        u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
                                drv_lic_key[port].max_fcoe_conn);
  
+       if (!CNIC_SUPPORT(bp)) {
+               bp->flags |= NO_FCOE_FLAG;
+               return;
+       }
        /* Get the number of maximum allowed FCoE connections */
        bp->cnic_eth_dev.max_fcoe_conn =
                (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
                if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
                        bnx2x_get_ext_wwn_info(bp, func);
  
-       } else if (IS_MF_FCOE_SD(bp))
+       } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
                bnx2x_get_ext_wwn_info(bp, func);
+       }
  
        BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
  
         */
        if (!bp->cnic_eth_dev.max_fcoe_conn)
                bp->flags |= NO_FCOE_FLAG;
- #else
-       bp->flags |= NO_FCOE_FLAG;
- #endif
  }
  
- static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+ static void bnx2x_get_cnic_info(struct bnx2x *bp)
  {
        /*
         * iSCSI may be dynamically disabled but reading
        bnx2x_get_fcoe_info(bp);
  }
  
- static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
  {
        u32 val, val2;
        int func = BP_ABS_FUNC(bp);
        int port = BP_PORT(bp);
- #ifdef BCM_CNIC
        u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
        u8 *fip_mac = bp->fip_mac;
- #endif
  
-       /* Zero primary MAC configuration */
-       memset(bp->dev->dev_addr, 0, ETH_ALEN);
-       if (BP_NOMCP(bp)) {
-               BNX2X_ERROR("warning: random MAC workaround active\n");
-               eth_hw_addr_random(bp->dev);
-       } else if (IS_MF(bp)) {
-               val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
-               val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
-               if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
-                   (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
-                       bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
- #ifdef BCM_CNIC
-               /*
-                * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+       if (IS_MF(bp)) {
+               /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
                 * FCoE MAC then the appropriate feature should be disabled.
-                *
-                * In non SD mode features configuration comes from
-                * struct func_ext_config.
+                * In non SD mode features configuration comes from struct
+                * func_ext_config.
                 */
-               if (!IS_MF_SD(bp)) {
+               if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
                        u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
                        if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
                                val2 = MF_CFG_RD(bp, func_ext_config[func].
-                                                    iscsi_mac_addr_upper);
+                                                iscsi_mac_addr_upper);
                                val = MF_CFG_RD(bp, func_ext_config[func].
-                                                   iscsi_mac_addr_lower);
+                                               iscsi_mac_addr_lower);
                                bnx2x_set_mac_buf(iscsi_mac, val, val2);
-                               BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
-                                              iscsi_mac);
-                       } else
+                               BNX2X_DEV_INFO
+                                       ("Read iSCSI MAC: %pM\n", iscsi_mac);
+                       } else {
                                bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+                       }
  
                        if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
                                val2 = MF_CFG_RD(bp, func_ext_config[func].
-                                                    fcoe_mac_addr_upper);
+                                                fcoe_mac_addr_upper);
                                val = MF_CFG_RD(bp, func_ext_config[func].
-                                                   fcoe_mac_addr_lower);
+                                               fcoe_mac_addr_lower);
                                bnx2x_set_mac_buf(fip_mac, val, val2);
-                               BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
-                                              fip_mac);
-                       } else
+                               BNX2X_DEV_INFO
+                                       ("Read FCoE L2 MAC: %pM\n", fip_mac);
+                       } else {
                                bp->flags |= NO_FCOE_FLAG;
+                       }
  
                        bp->mf_ext_config = cfg;
  
                } else { /* SD MODE */
-                       if (IS_MF_STORAGE_SD(bp)) {
-                               if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
-                                       /* use primary mac as iscsi mac */
-                                       memcpy(iscsi_mac, bp->dev->dev_addr,
-                                              ETH_ALEN);
-                                       BNX2X_DEV_INFO("SD ISCSI MODE\n");
-                                       BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
-                                                      iscsi_mac);
-                               } else { /* FCoE */
-                                       memcpy(fip_mac, bp->dev->dev_addr,
-                                              ETH_ALEN);
-                                       BNX2X_DEV_INFO("SD FCoE MODE\n");
-                                       BNX2X_DEV_INFO("Read FIP MAC: %pM\n",
-                                                      fip_mac);
-                               }
-                               /* Zero primary MAC configuration */
-                               memset(bp->dev->dev_addr, 0, ETH_ALEN);
+                       if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
+                               /* use primary mac as iscsi mac */
+                               memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+                               BNX2X_DEV_INFO("SD ISCSI MODE\n");
+                               BNX2X_DEV_INFO
+                                       ("Read iSCSI MAC: %pM\n", iscsi_mac);
+                       } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
+                               /* use primary mac as fip mac */
+                               memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+                               BNX2X_DEV_INFO("SD FCoE MODE\n");
+                               BNX2X_DEV_INFO
+                                       ("Read FIP MAC: %pM\n", fip_mac);
                        }
                }
  
+               if (IS_MF_STORAGE_SD(bp))
+                       /* Zero primary MAC configuration */
+                       memset(bp->dev->dev_addr, 0, ETH_ALEN);
                if (IS_MF_FCOE_AFEX(bp))
                        /* use FIP MAC as primary MAC */
                        memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
  
- #endif
        } else {
-               /* in SF read MACs from port configuration */
-               val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
-               val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
-               bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
- #ifdef BCM_CNIC
                val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
-                                   iscsi_mac_upper);
+                               iscsi_mac_upper);
                val = SHMEM_RD(bp, dev_info.port_hw_config[port].
-                                  iscsi_mac_lower);
+                              iscsi_mac_lower);
                bnx2x_set_mac_buf(iscsi_mac, val, val2);
  
                val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
-                                   fcoe_fip_mac_upper);
+                               fcoe_fip_mac_upper);
                val = SHMEM_RD(bp, dev_info.port_hw_config[port].
-                                  fcoe_fip_mac_lower);
+                              fcoe_fip_mac_lower);
                bnx2x_set_mac_buf(fip_mac, val, val2);
- #endif
        }
  
-       memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
-       memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
- #ifdef BCM_CNIC
-       /* Disable iSCSI if MAC configuration is
-        * invalid.
-        */
+       /* Disable iSCSI OOO if MAC configuration is invalid. */
        if (!is_valid_ether_addr(iscsi_mac)) {
-               bp->flags |= NO_ISCSI_FLAG;
+               bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
                memset(iscsi_mac, 0, ETH_ALEN);
        }
  
-       /* Disable FCoE if MAC configuration is
-        * invalid.
-        */
+       /* Disable FCoE if MAC configuration is invalid. */
        if (!is_valid_ether_addr(fip_mac)) {
                bp->flags |= NO_FCOE_FLAG;
                memset(bp->fip_mac, 0, ETH_ALEN);
        }
- #endif
+ }
+ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+ {
+       u32 val, val2;
+       int func = BP_ABS_FUNC(bp);
+       int port = BP_PORT(bp);
+       /* Zero primary MAC configuration */
+       memset(bp->dev->dev_addr, 0, ETH_ALEN);
+       if (BP_NOMCP(bp)) {
+               BNX2X_ERROR("warning: random MAC workaround active\n");
+               eth_hw_addr_random(bp->dev);
+       } else if (IS_MF(bp)) {
+               val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+               val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+               if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+                   (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
+                       bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+               if (CNIC_SUPPORT(bp))
+                       bnx2x_get_cnic_mac_hwinfo(bp);
+       } else {
+               /* in SF read MACs from port configuration */
+               val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+               val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+               bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+               if (CNIC_SUPPORT(bp))
+                       bnx2x_get_cnic_mac_hwinfo(bp);
+       }
+       memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
+       memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
  
        if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
                dev_err(&bp->pdev->dev,
                        "bad Ethernet MAC address configuration: %pM\n"
                        "change it manually before bringing up the appropriate network interface\n",
                        bp->dev->dev_addr);
+ }
  
+ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
+ {
+       int tmp;
+       u32 cfg;
  
+       if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
+               /* Take function: tmp = func */
+               tmp = BP_ABS_FUNC(bp);
+               cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
+               cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
+       } else {
+               /* Take port: tmp = port */
+               tmp = BP_PORT(bp);
+               cfg = SHMEM_RD(bp,
+                              dev_info.port_hw_config[tmp].generic_features);
+               cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
+       }
+       return cfg;
  }
  
- static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
+ static int bnx2x_get_hwinfo(struct bnx2x *bp)
  {
        int /*abs*/func = BP_ABS_FUNC(bp);
        int vn;
                        if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
                                dev_err(&bp->pdev->dev,
                                        "FORCING Normal Mode failed!!!\n");
+                               bnx2x_release_hw_lock(bp,
+                                                     HW_LOCK_RESOURCE_RESET);
                                return -EPERM;
                        }
                }
                } else
                        BNX2X_DEV_INFO("IGU Normal Mode\n");
  
-               bnx2x_get_igu_cam_info(bp);
+               rc = bnx2x_get_igu_cam_info(bp);
                bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+               if (rc)
+                       return rc;
        }
  
        /*
        return rc;
  }
  
- static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+ static void bnx2x_read_fwinfo(struct bnx2x *bp)
  {
        int cnt, i, block_end, rodi;
        char vpd_start[BNX2X_VPD_LEN+1];
@@@ -10778,7 -11062,7 +11062,7 @@@ out_not_found
        return;
  }
  
- static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
+ static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
  {
        u32 flags = 0;
  
        INIT_MODE_FLAGS(bp) = flags;
  }
  
- static int __devinit bnx2x_init_bp(struct bnx2x *bp)
+ static int bnx2x_init_bp(struct bnx2x *bp)
  {
        int func;
        int rc;
        mutex_init(&bp->port.phy_mutex);
        mutex_init(&bp->fw_mb_mutex);
        spin_lock_init(&bp->stats_lock);
- #ifdef BCM_CNIC
-       mutex_init(&bp->cnic_mutex);
- #endif
  
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
        INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
                dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
  
        bp->disable_tpa = disable_tpa;
- #ifdef BCM_CNIC
        bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
- #endif
  
        /* Set TPA flags */
        if (bp->disable_tpa) {
        if (CHIP_IS_E1(bp))
                bp->dropless_fc = 0;
        else
-               bp->dropless_fc = dropless_fc;
+               bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
  
        bp->mrrs = mrrs;
  
        bp->timer.data = (unsigned long) bp;
        bp->timer.function = bnx2x_timer;
  
-       bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
-       bnx2x_dcbx_init_params(bp);
+       if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
+           SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
+           SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
+           SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
+               bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
+               bnx2x_dcbx_init_params(bp);
+       } else {
+               bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
+       }
  
- #ifdef BCM_CNIC
        if (CHIP_IS_E1x(bp))
                bp->cnic_base_cl_id = FP_SB_MAX_E1x;
        else
                bp->cnic_base_cl_id = FP_SB_MAX_E2;
- #endif
  
        /* multiple tx priority */
        if (CHIP_IS_E1x(bp))
        if (CHIP_IS_E3B0(bp))
                bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
  
+       /* We need at least one default status block for slow-path events,
+        * second status block for the L2 queue, and a third status block for
+        * CNIC if supproted.
+        */
+       if (CNIC_SUPPORT(bp))
+               bp->min_msix_vec_cnt = 3;
+       else
+               bp->min_msix_vec_cnt = 2;
+       BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
        return rc;
  }
  
@@@ -11164,11 -11458,9 +11458,9 @@@ void bnx2x_set_rx_mode(struct net_devic
        }
  
        bp->rx_mode = rx_mode;
- #ifdef BCM_CNIC
        /* handle ISCSI SD mode */
        if (IS_MF_ISCSI_SD(bp))
                bp->rx_mode = BNX2X_RX_MODE_NONE;
- #endif
  
        /* Schedule the rx_mode command */
        if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@@ -11280,7 -11572,7 +11572,7 @@@ static const struct net_device_ops bnx2
  #endif
        .ndo_setup_tc           = bnx2x_setup_tc,
  
- #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+ #ifdef NETDEV_FCOE_WWNN
        .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
  #endif
  };
@@@ -11303,9 -11595,8 +11595,8 @@@ static int bnx2x_set_coherency_mask(str
        return 0;
  }
  
- static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
-                                   struct net_device *dev,
-                                   unsigned long board_type)
+ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
+                         unsigned long board_type)
  {
        struct bnx2x *bp;
        int rc;
                goto err_out_disable;
        }
  
+       pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
+       if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
+           PCICFG_REVESION_ID_ERROR_VAL) {
+               pr_err("PCI device error, probably due to fan failure, aborting\n");
+               rc = -ENODEV;
+               goto err_out_disable;
+       }
        if (atomic_read(&pdev->enable_cnt) == 1) {
                rc = pci_request_regions(pdev, DRV_MODULE_NAME);
                if (rc) {
@@@ -11477,8 -11776,7 +11776,7 @@@ err_out
        return rc;
  }
  
- static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
-                                                int *width, int *speed)
+ static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
  {
        u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
  
@@@ -11746,9 -12044,8 +12044,8 @@@ static int bnx2x_set_qm_cid_count(struc
  {
        int cid_count = BNX2X_L2_MAX_CID(bp);
  
- #ifdef BCM_CNIC
-       cid_count += CNIC_CID_MAX;
- #endif
+       if (CNIC_SUPPORT(bp))
+               cid_count += CNIC_CID_MAX;
        return roundup(cid_count, QM_CID_ROUND);
  }
  
   * @dev:      pci device
   *
   */
- static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
+ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
+                                    int cnic_cnt)
  {
        int pos;
        u16 control;
         * one fast path queue: one FP queue + SB for CNIC
         */
        if (!pos)
-               return 1 + CNIC_PRESENT;
+               return 1 + cnic_cnt;
  
        /*
         * The value in the PCI configuration space is the index of the last
        return control & PCI_MSIX_FLAGS_QSIZE;
  }
  
- static int __devinit bnx2x_init_one(struct pci_dev *pdev,
-                                   const struct pci_device_id *ent)
+ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *);
+ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
        struct net_device *dev = NULL;
        struct bnx2x *bp;
        int pcie_width, pcie_speed;
        int rc, max_non_def_sbs;
        int rx_count, tx_count, rss_count, doorbell_size;
+       int cnic_cnt;
        /*
         * An estimated maximum supported CoS number according to the chip
         * version.
                return -ENODEV;
        }
  
-       max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
+       cnic_cnt = 1;
+       max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
  
        WARN_ON(!max_non_def_sbs);
  
        /* Maximum number of RSS queues: one IGU SB goes to CNIC */
-       rss_count = max_non_def_sbs - CNIC_PRESENT;
+       rss_count = max_non_def_sbs - cnic_cnt;
  
        /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
-       rx_count = rss_count + FCOE_PRESENT;
+       rx_count = rss_count + cnic_cnt;
  
        /*
         * Maximum number of netdev Tx queues:
         * Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
         */
-       tx_count = rss_count * max_cos_est + FCOE_PRESENT;
+       tx_count = rss_count * max_cos_est + cnic_cnt;
  
        /* dev zeroed in init_etherdev */
        dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
  
        bp->igu_sb_cnt = max_non_def_sbs;
        bp->msg_enable = debug;
+       bp->cnic_support = cnic_cnt;
+       bp->cnic_probe = bnx2x_cnic_probe;
        pci_set_drvdata(pdev, dev);
  
        rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
                return rc;
        }
  
+       BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
        BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
  
        BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
        /* calc qm_cid_count */
        bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
  
- #ifdef BCM_CNIC
-       /* disable FCOE L2 queue for E1x */
+       /* disable FCOE L2 queue for E1x*/
        if (CHIP_IS_E1x(bp))
                bp->flags |= NO_FCOE_FLAG;
  
- #endif
+       /* disable FCOE for 57840 device, until FW supports it */
+       switch (ent->driver_data) {
+       case BCM57840_O:
+       case BCM57840_4_10:
+       case BCM57840_2_20:
+       case BCM57840_MFO:
+       case BCM57840_MF:
+               bp->flags |= NO_FCOE_FLAG;
+       }
  
        /* Set bp->num_queues for MSI-X mode*/
        bnx2x_set_num_queues(bp);
                goto init_one_exit;
        }
  
- #ifdef BCM_CNIC
        if (!NO_FCOE(bp)) {
                /* Add storage MAC address */
                rtnl_lock();
                dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
                rtnl_unlock();
        }
- #endif
  
        bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
  
@@@ -11961,7 -12271,7 +12271,7 @@@ init_one_exit
        return rc;
  }
  
- static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
+ static void bnx2x_remove_one(struct pci_dev *pdev)
  {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct bnx2x *bp;
        }
        bp = netdev_priv(dev);
  
- #ifdef BCM_CNIC
        /* Delete storage MAC address */
        if (!NO_FCOE(bp)) {
                rtnl_lock();
                dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
                rtnl_unlock();
        }
- #endif
  
  #ifdef BCM_DCBNL
        /* Delete app tlvs from dcbnl */
@@@ -12027,15 -12335,17 +12335,17 @@@ static int bnx2x_eeh_nic_unload(struct 
  
        bp->rx_mode = BNX2X_RX_MODE_NONE;
  
- #ifdef BCM_CNIC
-       bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
- #endif
+       if (CNIC_LOADED(bp))
+               bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
        /* Stop Tx */
        bnx2x_tx_disable(bp);
  
        bnx2x_netif_stop(bp, 0);
        /* Delete all NAPI objects */
        bnx2x_del_all_napi(bp);
+       if (CNIC_LOADED(bp))
+               bnx2x_del_all_napi_cnic(bp);
  
        del_timer_sync(&bp->timer);
  
@@@ -12176,7 -12486,7 +12486,7 @@@ static struct pci_driver bnx2x_pci_driv
        .name        = DRV_MODULE_NAME,
        .id_table    = bnx2x_pci_tbl,
        .probe       = bnx2x_init_one,
-       .remove      = __devexit_p(bnx2x_remove_one),
+       .remove      = bnx2x_remove_one,
        .suspend     = bnx2x_suspend,
        .resume      = bnx2x_resume,
        .err_handler = &bnx2x_err_handler,
@@@ -12226,7 -12536,6 +12536,6 @@@ void bnx2x_notify_link_changed(struct b
  module_init(bnx2x_init);
  module_exit(bnx2x_cleanup);
  
- #ifdef BCM_CNIC
  /**
   * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
   *
@@@ -12679,12 -12988,31 +12988,31 @@@ static int bnx2x_register_cnic(struct n
  {
        struct bnx2x *bp = netdev_priv(dev);
        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+       int rc;
+       DP(NETIF_MSG_IFUP, "Register_cnic called\n");
  
        if (ops == NULL) {
                BNX2X_ERR("NULL ops received\n");
                return -EINVAL;
        }
  
+       if (!CNIC_SUPPORT(bp)) {
+               BNX2X_ERR("Can't register CNIC when not supported\n");
+               return -EOPNOTSUPP;
+       }
+       if (!CNIC_LOADED(bp)) {
+               rc = bnx2x_load_cnic(bp);
+               if (rc) {
+                       BNX2X_ERR("CNIC-related load failed\n");
+                       return rc;
+               }
+       }
+       bp->cnic_enabled = true;
        bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
        if (!bp->cnic_kwq)
                return -ENOMEM;
@@@ -12774,7 -13102,5 +13102,5 @@@ struct cnic_eth_dev *bnx2x_cnic_probe(s
           cp->starting_cid);
        return cp;
  }
- EXPORT_SYMBOL(bnx2x_cnic_probe);
  
- #endif /* BCM_CNIC */
  
index d09b0d9ca68becd73cdf16ba2f38cfad95b77fea,3a2b8c65642dd7566ffbccfbbcc0f479ee3d49f2..8611c89f034db7b801b101bb42d5ba575a13f8e1
@@@ -64,7 -64,7 +64,7 @@@ static const char mlx4_en_version[] 
  
  /* Enable RSS UDP traffic */
  MLX4_EN_PARM_INT(udp_rss, 1,
 -               "Enable RSS for incomming UDP traffic or disabled (0)");
 +               "Enable RSS for incoming UDP traffic or disabled (0)");
  
  /* Priority pausing */
  MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
@@@ -250,7 -250,7 +250,7 @@@ static void *mlx4_en_add(struct mlx4_de
                                rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
                                                           min_t(int,
                                                                 dev->caps.num_comp_vectors,
-                                                                MAX_RX_RINGS)));
+                                                                DEF_RX_RINGS)));
                } else {
                        mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
                                min_t(int, dev->caps.comp_pool/
index 4c3b67c1304706ec54b7e99bdffc2094f972ce04,d833f592789120429d1c416f0c68b1e8ac528a8c..b30af2b1cd28d46cd6525b812ca50c8133a924bd
@@@ -34,29 -34,28 +34,28 @@@ static int qlcnic_mac_learn
  module_param(qlcnic_mac_learn, int, 0444);
  MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
  
- static int use_msi = 1;
- module_param(use_msi, int, 0444);
+ static int qlcnic_use_msi = 1;
  MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+ module_param_named(use_msi, qlcnic_use_msi, int, 0444);
  
- static int use_msi_x = 1;
- module_param(use_msi_x, int, 0444);
+ static int qlcnic_use_msi_x = 1;
  MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+ module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
  
- static int auto_fw_reset = 1;
- module_param(auto_fw_reset, int, 0644);
+ static int qlcnic_auto_fw_reset = 1;
  MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+ module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
  
- static int load_fw_file;
- module_param(load_fw_file, int, 0444);
+ static int qlcnic_load_fw_file;
  MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
+ module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
  
  static int qlcnic_config_npars;
  module_param(qlcnic_config_npars, int, 0444);
  MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
  
- static int __devinit qlcnic_probe(struct pci_dev *pdev,
-               const struct pci_device_id *ent);
- static void __devexit qlcnic_remove(struct pci_dev *pdev);
+ static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+ static void qlcnic_remove(struct pci_dev *pdev);
  static int qlcnic_open(struct net_device *netdev);
  static int qlcnic_close(struct net_device *netdev);
  static void qlcnic_tx_timeout(struct net_device *netdev);
@@@ -66,17 -65,10 +65,10 @@@ static void qlcnic_fw_poll_work(struct 
  static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
                work_func_t func, int delay);
  static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
- static int qlcnic_poll(struct napi_struct *napi, int budget);
- static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
  #ifdef CONFIG_NET_POLL_CONTROLLER
  static void qlcnic_poll_controller(struct net_device *netdev);
  #endif
  
- static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
- static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
- static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
- static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
  static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
  static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
  static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@@ -92,14 -84,15 +84,15 @@@ static int qlcnic_start_firmware(struc
  
  static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
  static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
- static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
- static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
  static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
  static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
                                struct qlcnic_esw_func_cfg *);
  static int qlcnic_vlan_rx_add(struct net_device *, u16);
  static int qlcnic_vlan_rx_del(struct net_device *, u16);
  
+ #define QLCNIC_IS_TSO_CAPABLE(adapter)        \
+       ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
  /*  PCI Device ID Table  */
  #define ENTRY(device) \
        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@@ -115,9 -108,7 +108,7 @@@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_p
  MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
  
  
- inline void
- qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
-               struct qlcnic_host_tx_ring *tx_ring)
+ inline void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *tx_ring)
  {
        writel(tx_ring->producer, tx_ring->crb_cmd_producer);
  }
@@@ -129,26 -120,34 +120,34 @@@ static const u32 msi_tgt_status[8] = 
        ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
  };
  
- static const
- struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
- static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
- {
-       writel(0, sds_ring->crb_intr_mask);
- }
- static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
- {
-       struct qlcnic_adapter *adapter = sds_ring->adapter;
+ static const struct qlcnic_board_info qlcnic_boards[] = {
+       {0x1077, 0x8020, 0x1077, 0x203,
+        "8200 Series Single Port 10GbE Converged Network Adapter"
+        "(TCP/IP Networking)"},
+       {0x1077, 0x8020, 0x1077, 0x207,
+        "8200 Series Dual Port 10GbE Converged Network Adapter"
+        "(TCP/IP Networking)"},
+       {0x1077, 0x8020, 0x1077, 0x20b,
+        "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
+       {0x1077, 0x8020, 0x1077, 0x20c,
+        "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
+       {0x1077, 0x8020, 0x1077, 0x20f,
+        "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
+       {0x1077, 0x8020, 0x103c, 0x3733,
+        "NC523SFP 10Gb 2-port Server Adapter"},
+       {0x1077, 0x8020, 0x103c, 0x3346,
+        "CN1000Q Dual Port Converged Network Adapter"},
+       {0x1077, 0x8020, 0x1077, 0x210,
+        "QME8242-k 10GbE Dual Port Mezzanine Card"},
+       {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
+ };
  
-       writel(0x1, sds_ring->crb_intr_mask);
+ #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
  
-       if (!QLCNIC_IS_MSI_FAMILY(adapter))
-               writel(0xfbff, adapter->tgt_mask_reg);
- }
+ static const
+ struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
  
- static int
- qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+ int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
  {
        int size = sizeof(struct qlcnic_host_sds_ring) * count;
  
        return recv_ctx->sds_rings == NULL;
  }
  
- static void
- qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
  {
        if (recv_ctx->sds_rings != NULL)
                kfree(recv_ctx->sds_rings);
        recv_ctx->sds_rings = NULL;
  }
  
- static int
- qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
- {
-       int ring;
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-       if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
-               return -ENOMEM;
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               if (ring == adapter->max_sds_rings - 1)
-                       netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
-                               QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
-               else
-                       netif_napi_add(netdev, &sds_ring->napi,
-                               qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
-       }
-       return 0;
- }
- static void
- qlcnic_napi_del(struct qlcnic_adapter *adapter)
- {
-       int ring;
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               netif_napi_del(&sds_ring->napi);
-       }
-       qlcnic_free_sds_rings(adapter->recv_ctx);
- }
- static void
- qlcnic_napi_enable(struct qlcnic_adapter *adapter)
- {
-       int ring;
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
-               return;
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               napi_enable(&sds_ring->napi);
-               qlcnic_enable_int(sds_ring);
-       }
- }
- static void
- qlcnic_napi_disable(struct qlcnic_adapter *adapter)
- {
-       int ring;
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
-               return;
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               qlcnic_disable_int(sds_ring);
-               napi_synchronize(&sds_ring->napi);
-               napi_disable(&sds_ring->napi);
-       }
- }
  static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
  {
        memset(&adapter->stats, 0, sizeof(adapter->stats));
@@@ -363,7 -287,7 +287,7 @@@ static int qlcnic_enable_msix(struct ql
        adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
        qlcnic_set_msix_bit(pdev, 0);
  
-       if (adapter->msix_supported) {
+       if (adapter->ahw->msix_supported) {
   enable_msix:
                qlcnic_init_msix_entries(adapter, num_msix);
                err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
        return err;
  }
  
  static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
  {
+       u32 offset, mask_reg;
        const struct qlcnic_legacy_intr_set *legacy_intrp;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct pci_dev *pdev = adapter->pdev;
  
-       if (use_msi && !pci_enable_msi(pdev)) {
+       if (qlcnic_use_msi && !pci_enable_msi(pdev)) {
                adapter->flags |= QLCNIC_MSI_ENABLED;
-               adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
-                               msi_tgt_status[adapter->ahw->pci_func]);
+               offset = msi_tgt_status[adapter->ahw->pci_func];
+               adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter->ahw,
+                                                           offset);
                dev_info(&pdev->dev, "using msi interrupts\n");
                adapter->msix_entries[0].vector = pdev->irq;
                return;
        }
  
        legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
-       adapter->int_vec_bit = legacy_intrp->int_vec_bit;
-       adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
-                       legacy_intrp->tgt_status_reg);
-       adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
-                       legacy_intrp->tgt_mask_reg);
-       adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
-       adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
-                       ISR_INT_STATE_REG);
+       adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
+       offset = legacy_intrp->tgt_status_reg;
+       adapter->tgt_status_reg = qlcnic_get_ioaddr(ahw, offset);
+       mask_reg = legacy_intrp->tgt_mask_reg;
+       adapter->tgt_mask_reg = qlcnic_get_ioaddr(ahw, mask_reg);
+       adapter->isr_int_vec = qlcnic_get_ioaddr(ahw, ISR_INT_VECTOR);
+       adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
        dev_info(&pdev->dev, "using legacy interrupts\n");
        adapter->msix_entries[0].vector = pdev->irq;
  }
@@@ -420,7 -343,7 +343,7 @@@ qlcnic_setup_intr(struct qlcnic_adapte
  {
        int num_msix;
  
-       if (adapter->msix_supported) {
+       if (adapter->ahw->msix_supported) {
                num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
                                QLCNIC_DEF_NUM_STS_DESC_RINGS));
        } else
@@@ -448,19 -371,25 +371,25 @@@ qlcnic_cleanup_pci_map(struct qlcnic_ad
                iounmap(adapter->ahw->pci_base0);
  }
  
- static int
- qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+ static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
  {
        struct qlcnic_pci_info *pci_info;
-       int i, ret = 0;
+       int i, ret = 0, j = 0;
+       u16 act_pci_func;
        u8 pfn;
  
        pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
        if (!pci_info)
                return -ENOMEM;
  
+       ret = qlcnic_get_pci_info(adapter, pci_info);
+       if (ret)
+               goto err_pci_info;
+       act_pci_func = adapter->ahw->act_pci_func;
        adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
-                               QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
+                                act_pci_func, GFP_KERNEL);
        if (!adapter->npars) {
                ret = -ENOMEM;
                goto err_pci_info;
                goto err_npars;
        }
  
-       ret = qlcnic_get_pci_info(adapter, pci_info);
-       if (ret)
-               goto err_eswitch;
        for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
                pfn = pci_info[i].id;
                if (pfn >= QLCNIC_MAX_PCI_FUNC) {
                        ret = QL_STATUS_INVALID_PARAM;
                        goto err_eswitch;
                }
-               adapter->npars[pfn].active = (u8)pci_info[i].active;
-               adapter->npars[pfn].type = (u8)pci_info[i].type;
-               adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
-               adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
-               adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
+               if (!pci_info[i].active ||
+                   (pci_info[i].type != QLCNIC_TYPE_NIC))
+                       continue;
+               adapter->npars[j].pci_func = pfn;
+               adapter->npars[j].active = (u8)pci_info[i].active;
+               adapter->npars[j].type = (u8)pci_info[i].type;
+               adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
+               adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
+               adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+               j++;
        }
  
        for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
@@@ -512,33 -445,29 +445,29 @@@ static in
  qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
  {
        u8 id;
-       u32 ref_count;
        int i, ret = 1;
        u32 data = QLCNIC_MGMT_FUNC;
-       void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
  
-       /* If other drivers are not in use set their privilege level */
-       ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
        ret = qlcnic_api_lock(adapter);
        if (ret)
                goto err_lock;
  
        if (qlcnic_config_npars) {
-               for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
-                       id = i;
-                       if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
-                               id == adapter->ahw->pci_func)
+               for (i = 0; i < ahw->act_pci_func; i++) {
+                       id = adapter->npars[i].pci_func;
+                       if (id == ahw->pci_func)
                                continue;
                        data |= (qlcnic_config_npars &
                                        QLC_DEV_SET_DRV(0xf, id));
                }
        } else {
-               data = readl(priv_op);
-               data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
+               data = QLCRD32(adapter, QLCNIC_DRV_OP_MODE);
+               data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) |
                        (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
-                       adapter->ahw->pci_func));
+                                        ahw->pci_func));
        }
-       writel(data, priv_op);
+       QLCWR32(adapter, QLCNIC_DRV_OP_MODE, data);
        qlcnic_api_unlock(adapter);
  err_lock:
        return ret;
@@@ -554,8 -483,8 +483,8 @@@ qlcnic_check_vf(struct qlcnic_adapter *
        u32 op_mode, priv_level;
  
        /* Determine FW API version */
-       adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
-                                       QLCNIC_FW_API);
+       adapter->ahw->fw_hal_version = readl(adapter->ahw->pci_base0 +
+                                            QLCNIC_FW_API);
  
        /* Find PCI function number */
        pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
                priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
  
        if (priv_level == QLCNIC_NON_PRIV_FUNC) {
-               adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
+               adapter->ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
                dev_info(&adapter->pdev->dev,
                        "HAL Version: %d Non Privileged function\n",
-                       adapter->fw_hal_version);
+                        adapter->ahw->fw_hal_version);
                adapter->nic_ops = &qlcnic_vf_ops;
        } else
                adapter->nic_ops = &qlcnic_ops;
  }
  
- static int
qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
+ #define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
  {
-       void __iomem *mem_ptr0 = NULL;
-       resource_size_t mem_base;
-       unsigned long mem_len, pci_len0 = 0;
+       switch (dev_id) {
+       case PCI_DEVICE_ID_QLOGIC_QLE824X:
+               *bar = QLCNIC_82XX_BAR0_LENGTH;
+               break;
+       default:
+               *bar = 0;
+       }
+ }
  
-       struct pci_dev *pdev = adapter->pdev;
+ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
+                               struct qlcnic_hardware_context *ahw)
+ {
+       u32 offset;
+       void __iomem *mem_ptr0 = NULL;
+       unsigned long mem_len, pci_len0 = 0, bar0_len;
  
        /* remap phys address */
-       mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
        mem_len = pci_resource_len(pdev, 0);
  
-       if (mem_len == QLCNIC_PCI_2MB_SIZE) {
+       qlcnic_get_bar_length(pdev->device, &bar0_len);
+       if (mem_len >= bar0_len) {
  
                mem_ptr0 = pci_ioremap_bar(pdev, 0);
                if (mem_ptr0 == NULL) {
        }
  
        dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
-       adapter->ahw->pci_base0 = mem_ptr0;
-       adapter->ahw->pci_len0 = pci_len0;
-       qlcnic_check_vf(adapter);
-       adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
-               QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
-                       adapter->ahw->pci_func)));
+       ahw->pci_base0 = mem_ptr0;
+       ahw->pci_len0 = pci_len0;
+       offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
+       qlcnic_get_ioaddr(ahw, offset);
  
        return 0;
  }
  
- static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
+ static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
  {
        struct pci_dev *pdev = adapter->pdev;
        int i, found = 0;
@@@ -659,7 -593,7 +593,7 @@@ qlcnic_check_options(struct qlcnic_adap
  
        adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
  
-       if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) {
+       if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
                if (fw_dump->tmpl_hdr == NULL ||
                                adapter->fw_version > prev_fw_version) {
                        if (fw_dump->tmpl_hdr)
                adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
        }
  
-       adapter->msix_supported = !!use_msi_x;
+       adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
  
        adapter->num_txd = MAX_CMD_DESCRIPTORS;
  
@@@ -704,19 -638,20 +638,20 @@@ qlcnic_initialize_nic(struct qlcnic_ada
        int err;
        struct qlcnic_info nic_info;
  
+       memset(&nic_info, 0, sizeof(struct qlcnic_info));
        err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
        if (err)
                return err;
  
-       adapter->physical_port = (u8)nic_info.phys_port;
-       adapter->switch_mode = nic_info.switch_mode;
-       adapter->max_tx_ques = nic_info.max_tx_ques;
-       adapter->max_rx_ques = nic_info.max_rx_ques;
-       adapter->capabilities = nic_info.capabilities;
-       adapter->max_mac_filters = nic_info.max_mac_filters;
-       adapter->max_mtu = nic_info.max_mtu;
+       adapter->ahw->physical_port = (u8)nic_info.phys_port;
+       adapter->ahw->switch_mode = nic_info.switch_mode;
+       adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
+       adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
+       adapter->ahw->capabilities = nic_info.capabilities;
+       adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
+       adapter->ahw->max_mtu = nic_info.max_mtu;
  
-       if (adapter->capabilities & BIT_6)
+       if (adapter->ahw->capabilities & BIT_6)
                adapter->flags |= QLCNIC_ESWITCH_ENABLED;
        else
                adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
        return err;
  }
  
- static void
- qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
-               struct qlcnic_esw_func_cfg *esw_cfg)
+ void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
+                           struct qlcnic_esw_func_cfg *esw_cfg)
  {
        if (esw_cfg->discard_tagged)
                adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
@@@ -757,9 -691,8 +691,8 @@@ qlcnic_vlan_rx_del(struct net_device *n
        return 0;
  }
  
- static void
- qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
-               struct qlcnic_esw_func_cfg *esw_cfg)
+ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
+                                     struct qlcnic_esw_func_cfg *esw_cfg)
  {
        adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
                                QLCNIC_PROMISC_DISABLED);
        qlcnic_set_netdev_features(adapter, esw_cfg);
  }
  
- static int
- qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+ static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
  {
        struct qlcnic_esw_func_cfg esw_cfg;
  
@@@ -805,7 -737,7 +737,7 @@@ qlcnic_set_netdev_features(struct qlcni
        vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
                        NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
  
-       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
+       if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
                features |= (NETIF_F_TSO | NETIF_F_TSO6);
                vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
        }
@@@ -851,7 -783,7 +783,7 @@@ qlcnic_check_eswitch_mode(struct qlcnic
  
        if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
                if (priv_level == QLCNIC_MGMT_FUNC) {
-                       adapter->op_mode = QLCNIC_MGMT_FUNC;
+                       adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
                        err = qlcnic_init_pci_info(adapter);
                        if (err)
                                return err;
                        qlcnic_set_function_modes(adapter);
                        dev_info(&adapter->pdev->dev,
                                "HAL Version: %d, Management function\n",
-                               adapter->fw_hal_version);
+                                adapter->ahw->fw_hal_version);
                } else if (priv_level == QLCNIC_PRIV_FUNC) {
-                       adapter->op_mode = QLCNIC_PRIV_FUNC;
+                       adapter->ahw->op_mode = QLCNIC_PRIV_FUNC;
                        dev_info(&adapter->pdev->dev,
                                "HAL Version: %d, Privileged function\n",
-                               adapter->fw_hal_version);
+                                adapter->ahw->fw_hal_version);
                }
        }
  
        return err;
  }
  
- static int
- qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+ static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
  {
        struct qlcnic_esw_func_cfg esw_cfg;
        struct qlcnic_npar_info *npar;
        if (adapter->need_fw_reset)
                return 0;
  
-       for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
-               if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
-                       continue;
+       for (i = 0; i < adapter->ahw->act_pci_func; i++) {
                memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
-               esw_cfg.pci_func = i;
-               esw_cfg.offload_flags = BIT_0;
+               esw_cfg.pci_func = adapter->npars[i].pci_func;
                esw_cfg.mac_override = BIT_0;
                esw_cfg.promisc_mode = BIT_0;
-               if (adapter->capabilities  & QLCNIC_FW_CAPABILITY_TSO)
-                       esw_cfg.offload_flags |= (BIT_1 | BIT_2);
+               if (qlcnic_82xx_check(adapter)) {
+                       esw_cfg.offload_flags = BIT_0;
+                       if (QLCNIC_IS_TSO_CAPABLE(adapter))
+                               esw_cfg.offload_flags |= (BIT_1 | BIT_2);
+               }
                if (qlcnic_config_switch_port(adapter, &esw_cfg))
                        return -EIO;
                npar = &adapter->npars[i];
@@@ -930,22 -861,24 +861,24 @@@ qlcnic_reset_eswitch_config(struct qlcn
        return 0;
  }
  
- static int
- qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
+ static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
  {
        int i, err;
        struct qlcnic_npar_info *npar;
        struct qlcnic_info nic_info;
+       u8 pci_func;
  
-       if (!adapter->need_fw_reset)
-               return 0;
+       if (qlcnic_82xx_check(adapter))
+               if (!adapter->need_fw_reset)
+                       return 0;
  
        /* Set the NPAR config data after FW reset */
-       for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+       for (i = 0; i < adapter->ahw->act_pci_func; i++) {
                npar = &adapter->npars[i];
-               if (npar->type != QLCNIC_TYPE_NIC)
-                       continue;
-               err = qlcnic_get_nic_info(adapter, &nic_info, i);
+               pci_func = npar->pci_func;
+               memset(&nic_info, 0, sizeof(struct qlcnic_info));
+               err = qlcnic_get_nic_info(adapter,
+                                         &nic_info, pci_func);
                if (err)
                        return err;
                nic_info.min_tx_bw = npar->min_bw;
  
                if (npar->enable_pm) {
                        err = qlcnic_config_port_mirroring(adapter,
-                                                       npar->dest_npar, 1, i);
+                                                          npar->dest_npar, 1,
+                                                          pci_func);
                        if (err)
                                return err;
                }
-               err = qlcnic_reset_eswitch_config(adapter, npar, i);
+               err = qlcnic_reset_eswitch_config(adapter, npar, pci_func);
                if (err)
                        return err;
        }
@@@ -972,7 -906,7 +906,7 @@@ static int qlcnic_check_npar_opertional
        u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
        u32 npar_state;
  
-       if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+       if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
                return 0;
  
        npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
        }
        if (!npar_opt_timeo) {
                dev_err(&adapter->pdev->dev,
 -                      "Waiting for NPAR state to opertional timeout\n");
 +                      "Waiting for NPAR state to operational timeout\n");
                return -EIO;
        }
        return 0;
@@@ -994,7 -928,7 +928,7 @@@ qlcnic_set_mgmt_operations(struct qlcni
        int err;
  
        if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
-                   adapter->op_mode != QLCNIC_MGMT_FUNC)
+           adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
                return 0;
  
        err = qlcnic_set_default_offload_settings(adapter);
@@@ -1021,14 -955,14 +955,14 @@@ qlcnic_start_firmware(struct qlcnic_ada
        else if (!err)
                goto check_fw_status;
  
-       if (load_fw_file)
+       if (qlcnic_load_fw_file)
                qlcnic_request_firmware(adapter);
        else {
                err = qlcnic_check_flash_fw_ver(adapter);
                if (err)
                        goto err_out;
  
-               adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
+               adapter->ahw->fw_type = QLCNIC_FLASH_ROMIMAGE;
        }
  
        err = qlcnic_need_fw_reset(adapter);
@@@ -1089,7 -1023,7 +1023,7 @@@ qlcnic_request_irq(struct qlcnic_adapte
        struct net_device *netdev = adapter->netdev;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
-       if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
                handler = qlcnic_tmp_intr;
                if (!QLCNIC_IS_MSI_FAMILY(adapter))
                        flags |= IRQF_SHARED;
@@@ -1148,7 -1082,7 +1082,7 @@@ __qlcnic_up(struct qlcnic_adapter *adap
        if (qlcnic_set_eswitch_port_config(adapter))
                return -EIO;
  
-       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+       if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
                capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
                if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
                        adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
  
        qlcnic_linkevent_request(adapter, 1);
  
-       adapter->reset_context = 0;
+       adapter->ahw->reset_context = 0;
        set_bit(__QLCNIC_DEV_UP, &adapter->state);
        return 0;
  }
@@@ -1312,7 -1246,7 +1246,7 @@@ void qlcnic_diag_free_res(struct net_de
        int ring;
  
        clear_bit(__QLCNIC_DEV_UP, &adapter->state);
-       if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
                for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                        sds_ring = &adapter->recv_ctx->sds_rings[ring];
                        qlcnic_disable_int(sds_ring);
  
        qlcnic_detach(adapter);
  
-       adapter->diag_test = 0;
+       adapter->ahw->diag_test = 0;
        adapter->max_sds_rings = max_sds_rings;
  
        if (qlcnic_attach(adapter))
@@@ -1393,7 -1327,7 +1327,7 @@@ int qlcnic_diag_alloc_res(struct net_de
        qlcnic_detach(adapter);
  
        adapter->max_sds_rings = 1;
-       adapter->diag_test = test;
+       adapter->ahw->diag_test = test;
  
        ret = qlcnic_attach(adapter);
        if (ret) {
                qlcnic_post_rx_buffers(adapter, rds_ring);
        }
  
-       if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
                for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                        sds_ring = &adapter->recv_ctx->sds_rings[ring];
                        qlcnic_enable_int(sds_ring);
                }
        }
  
-       if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) {
+       if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
                adapter->ahw->loopback_state = 0;
                qlcnic_linkevent_request(adapter, 1);
        }
@@@ -1485,14 -1419,14 +1419,14 @@@ qlcnic_reset_context(struct qlcnic_adap
  }
  
  static int
- qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
-               struct net_device *netdev, u8 pci_using_dac)
+ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
+                   int pci_using_dac)
  {
        int err;
        struct pci_dev *pdev = adapter->pdev;
  
-       adapter->mc_enabled = 0;
-       adapter->max_mc_count = 38;
+       adapter->ahw->mc_enabled = 0;
+       adapter->ahw->max_mc_count = 38;
  
        netdev->netdev_ops         = &qlcnic_netdev_ops;
        netdev->watchdog_timeo     = 5*HZ;
        netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
                NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
  
-       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+       if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
                netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
-       if (pci_using_dac)
+       if (pci_using_dac == 1)
                netdev->hw_features |= NETIF_F_HIGHDMA;
  
        netdev->vlan_features = netdev->hw_features;
  
-       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
+       if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
                netdev->hw_features |= NETIF_F_HW_VLAN_TX;
-       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+       if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
                netdev->hw_features |= NETIF_F_LRO;
  
        netdev->features |= netdev->hw_features |
        return 0;
  }
  
- static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
+ static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
  {
        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
                        !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
@@@ -1559,15 -1493,14 +1493,14 @@@ qlcnic_alloc_msix_entries(struct qlcnic
        return -ENOMEM;
  }
  
- static int __devinit
+ static int
  qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
        struct net_device *netdev = NULL;
        struct qlcnic_adapter *adapter = NULL;
-       int err;
+       int err, pci_using_dac = -1;
        uint8_t revision_id;
-       uint8_t pci_using_dac;
-       char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
+       char board_name[QLCNIC_MAX_BOARD_NAME_LEN];
  
        err = pci_enable_device(pdev);
        if (err)
        spin_lock_init(&adapter->tx_clean_lock);
        INIT_LIST_HEAD(&adapter->mac_list);
  
-       err = qlcnic_setup_pci_map(adapter);
+       err = qlcnic_setup_pci_map(pdev, adapter->ahw);
        if (err)
                goto err_out_free_hw;
+       qlcnic_check_vf(adapter);
  
        /* This will be reset for mezz cards  */
        adapter->portnum = adapter->ahw->pci_func;
                dev_warn(&pdev->dev, "failed to read mac addr\n");
  
        if (adapter->portnum == 0) {
-               get_brd_name(adapter, brd_name);
+               qlcnic_get_board_name(adapter, board_name);
                pr_info("%s: %s Board Chip rev 0x%x\n",
-                               module_name(THIS_MODULE),
-                               brd_name, adapter->ahw->revision_id);
+                       module_name(THIS_MODULE),
+                       board_name, adapter->ahw->revision_id);
        }
  
        qlcnic_clear_stats(adapter);
  
-       err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
+       err = qlcnic_alloc_msix_entries(adapter, adapter->ahw->max_rx_ques);
        if (err)
                goto err_out_decr_ref;
  
  
        pci_set_drvdata(pdev, adapter);
  
-       qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+       if (qlcnic_82xx_check(adapter))
+               qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+                                    FW_POLL_DELAY);
  
        switch (adapter->ahw->port_type) {
        case QLCNIC_GBE:
@@@ -1724,7 -1659,7 +1659,7 @@@ err_out_maintenance_mode
        return 0;
  }
  
- static void __devexit qlcnic_remove(struct pci_dev *pdev)
+ static void qlcnic_remove(struct pci_dev *pdev)
  {
        struct qlcnic_adapter *adapter;
        struct net_device *netdev;
        if (adapter->eswitch != NULL)
                kfree(adapter->eswitch);
  
-       qlcnic_clr_all_drv_state(adapter, 0);
+       if (qlcnic_82xx_check(adapter))
+               qlcnic_clr_all_drv_state(adapter, 0);
  
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
  
@@@ -1782,7 -1718,8 +1718,8 @@@ static int __qlcnic_shutdown(struct pci
        if (netif_running(netdev))
                qlcnic_down(adapter, netdev);
  
-       qlcnic_clr_all_drv_state(adapter, 0);
+       if (qlcnic_82xx_check(adapter))
+               qlcnic_clr_all_drv_state(adapter, 0);
  
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
  
        if (retval)
                return retval;
  
-       if (qlcnic_wol_supported(adapter)) {
-               pci_enable_wake(pdev, PCI_D3cold, 1);
-               pci_enable_wake(pdev, PCI_D3hot, 1);
+       if (qlcnic_82xx_check(adapter)) {
+               if (qlcnic_wol_supported(adapter)) {
+                       pci_enable_wake(pdev, PCI_D3cold, 1);
+                       pci_enable_wake(pdev, PCI_D3hot, 1);
+               }
        }
  
        return 0;
@@@ -1927,706 -1866,148 +1866,148 @@@ static void qlcnic_free_lb_filters_mem(
        adapter->fhash.fmax = 0;
  }
  
- static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
-               u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
+ static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
  {
-       struct cmd_desc_type0 *hwdesc;
-       struct qlcnic_nic_req *req;
-       struct qlcnic_mac_req *mac_req;
-       struct qlcnic_vlan_req *vlan_req;
-       u32 producer;
-       u64 word;
-       producer = tx_ring->producer;
-       hwdesc = &tx_ring->desc_head[tx_ring->producer];
-       req = (struct qlcnic_nic_req *)hwdesc;
-       memset(req, 0, sizeof(struct qlcnic_nic_req));
-       req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
-       word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
-       req->req_hdr = cpu_to_le64(word);
+       struct net_device *netdev = adapter->netdev;
+       u32 temp_state, temp_val, temp = 0;
+       int rv = 0;
  
-       mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
-       mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
-       memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
+       if (qlcnic_82xx_check(adapter))
+               temp = QLCRD32(adapter, CRB_TEMP_STATE);
  
-       vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
-       vlan_req->vlan_id = vlan_id;
+       temp_state = qlcnic_get_temp_state(temp);
+       temp_val = qlcnic_get_temp_val(temp);
  
-       tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
-       smp_mb();
+       if (temp_state == QLCNIC_TEMP_PANIC) {
+               dev_err(&netdev->dev,
+                      "Device temperature %d degrees C exceeds"
+                      " maximum allowed. Hardware has been shut down.\n",
+                      temp_val);
+               rv = 1;
+       } else if (temp_state == QLCNIC_TEMP_WARN) {
+               if (adapter->ahw->temp == QLCNIC_TEMP_NORMAL) {
+                       dev_err(&netdev->dev,
+                              "Device temperature %d degrees C "
+                              "exceeds operating range."
+                              " Immediate action needed.\n",
+                              temp_val);
+               }
+       } else {
+               if (adapter->ahw->temp == QLCNIC_TEMP_WARN) {
+                       dev_info(&netdev->dev,
+                              "Device temperature is now %d degrees C"
+                              " in normal range.\n", temp_val);
+               }
+       }
+       adapter->ahw->temp = temp_state;
+       return rv;
  }
  
- #define QLCNIC_MAC_HASH(MAC)\
-       ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
- static void
- qlcnic_send_filter(struct qlcnic_adapter *adapter,
-               struct qlcnic_host_tx_ring *tx_ring,
-               struct cmd_desc_type0 *first_desc,
-               struct sk_buff *skb)
+ static void qlcnic_tx_timeout(struct net_device *netdev)
  {
-       struct ethhdr *phdr = (struct ethhdr *)(skb->data);
-       struct qlcnic_filter *fil, *tmp_fil;
-       struct hlist_node *tmp_hnode, *n;
-       struct hlist_head *head;
-       u64 src_addr = 0;
-       __le16 vlan_id = 0;
-       u8 hindex;
-       if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
-               return;
-       if (adapter->fhash.fnum >= adapter->fhash.fmax)
-               return;
-       /* Only NPAR capable devices support vlan based learning*/
-       if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
-               vlan_id = first_desc->vlan_TCI;
-       memcpy(&src_addr, phdr->h_source, ETH_ALEN);
-       hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
-       head = &(adapter->fhash.fhead[hindex]);
-       hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
-               if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
-                           tmp_fil->vlan_id == vlan_id) {
-                       if (jiffies >
-                           (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
-                               qlcnic_change_filter(adapter, src_addr, vlan_id,
-                                                               tx_ring);
-                       tmp_fil->ftime = jiffies;
-                       return;
-               }
-       }
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
  
-       fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
-       if (!fil)
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return;
  
-       qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
+       dev_err(&netdev->dev, "transmit timeout, resetting.\n");
  
-       fil->ftime = jiffies;
-       fil->vlan_id = vlan_id;
-       memcpy(fil->faddr, &src_addr, ETH_ALEN);
-       spin_lock(&adapter->mac_learn_lock);
-       hlist_add_head(&(fil->fnode), head);
-       adapter->fhash.fnum++;
-       spin_unlock(&adapter->mac_learn_lock);
+       if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
+               adapter->need_fw_reset = 1;
+       else
+               adapter->ahw->reset_context = 1;
  }
  
- static int
- qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
-               struct cmd_desc_type0 *first_desc,
-               struct sk_buff *skb)
+ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
  {
-       u8 opcode = 0, hdr_len = 0;
-       u16 flags = 0, vlan_tci = 0;
-       int copied, offset, copy_len;
-       struct cmd_desc_type0 *hwdesc;
-       struct vlan_ethhdr *vh;
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
-       u16 protocol = ntohs(skb->protocol);
-       u32 producer = tx_ring->producer;
-       if (protocol == ETH_P_8021Q) {
-               vh = (struct vlan_ethhdr *)skb->data;
-               flags = FLAGS_VLAN_TAGGED;
-               vlan_tci = vh->h_vlan_TCI;
-               protocol = ntohs(vh->h_vlan_encapsulated_proto);
-       } else if (vlan_tx_tag_present(skb)) {
-               flags = FLAGS_VLAN_OOB;
-               vlan_tci = vlan_tx_tag_get(skb);
-       }
-       if (unlikely(adapter->pvid)) {
-               if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
-                       return -EIO;
-               if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
-                       goto set_flags;
-               flags = FLAGS_VLAN_OOB;
-               vlan_tci = adapter->pvid;
-       }
- set_flags:
-       qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
-       qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
-       if (*(skb->data) & BIT_0) {
-               flags |= BIT_0;
-               memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
-       }
-       opcode = TX_ETHER_PKT;
-       if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
-                       skb_shinfo(skb)->gso_size > 0) {
-               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-               first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
-               first_desc->total_hdr_length = hdr_len;
-               opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
-               /* For LSO, we need to copy the MAC/IP/TCP headers into
-               * the descriptor ring */
-               copied = 0;
-               offset = 2;
-               if (flags & FLAGS_VLAN_OOB) {
-                       first_desc->total_hdr_length += VLAN_HLEN;
-                       first_desc->tcp_hdr_offset = VLAN_HLEN;
-                       first_desc->ip_hdr_offset = VLAN_HLEN;
-                       /* Only in case of TSO on vlan device */
-                       flags |= FLAGS_VLAN_TAGGED;
-                       /* Create a TSO vlan header template for firmware */
-                       hwdesc = &tx_ring->desc_head[producer];
-                       tx_ring->cmd_buf_arr[producer].skb = NULL;
-                       copy_len = min((int)sizeof(struct cmd_desc_type0) -
-                               offset, hdr_len + VLAN_HLEN);
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct net_device_stats *stats = &netdev->stats;
  
-                       vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
-                       skb_copy_from_linear_data(skb, vh, 12);
-                       vh->h_vlan_proto = htons(ETH_P_8021Q);
-                       vh->h_vlan_TCI = htons(vlan_tci);
+       stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+       stats->tx_packets = adapter->stats.xmitfinished;
+       stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
+       stats->tx_bytes = adapter->stats.txbytes;
+       stats->rx_dropped = adapter->stats.rxdropped;
+       stats->tx_dropped = adapter->stats.txdropped;
  
-                       skb_copy_from_linear_data_offset(skb, 12,
-                               (char *)vh + 16, copy_len - 16);
+       return stats;
+ }
  
-                       copied = copy_len - VLAN_HLEN;
-                       offset = 0;
+ static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+ {
+       u32 status;
  
-                       producer = get_next_index(producer, tx_ring->num_desc);
-               }
+       status = readl(adapter->isr_int_vec);
  
-               while (copied < hdr_len) {
+       if (!(status & adapter->ahw->int_vec_bit))
+               return IRQ_NONE;
  
-                       copy_len = min((int)sizeof(struct cmd_desc_type0) -
-                               offset, (hdr_len - copied));
+       /* check interrupt state machine, to be sure */
+       status = readl(adapter->crb_int_state_reg);
+       if (!ISR_LEGACY_INT_TRIGGERED(status))
+               return IRQ_NONE;
  
-                       hwdesc = &tx_ring->desc_head[producer];
-                       tx_ring->cmd_buf_arr[producer].skb = NULL;
+       writel(0xffffffff, adapter->tgt_status_reg);
+       /* read twice to ensure write is flushed */
+       readl(adapter->isr_int_vec);
+       readl(adapter->isr_int_vec);
  
-                       skb_copy_from_linear_data_offset(skb, copied,
-                                (char *) hwdesc + offset, copy_len);
+       return IRQ_HANDLED;
+ }
  
-                       copied += copy_len;
-                       offset = 0;
+ static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
+ {
+       struct qlcnic_host_sds_ring *sds_ring = data;
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
  
-                       producer = get_next_index(producer, tx_ring->num_desc);
-               }
+       if (adapter->flags & QLCNIC_MSIX_ENABLED)
+               goto done;
+       else if (adapter->flags & QLCNIC_MSI_ENABLED) {
+               writel(0xffffffff, adapter->tgt_status_reg);
+               goto done;
+       }
  
-               tx_ring->producer = producer;
-               smp_mb();
-               adapter->stats.lso_frames++;
+       if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+               return IRQ_NONE;
  
-       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               u8 l4proto;
+ done:
+       adapter->ahw->diag_cnt++;
+       qlcnic_enable_int(sds_ring);
+       return IRQ_HANDLED;
+ }
  
-               if (protocol == ETH_P_IP) {
-                       l4proto = ip_hdr(skb)->protocol;
+ static irqreturn_t qlcnic_intr(int irq, void *data)
+ {
+       struct qlcnic_host_sds_ring *sds_ring = data;
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
  
-                       if (l4proto == IPPROTO_TCP)
-                               opcode = TX_TCP_PKT;
-                       else if (l4proto == IPPROTO_UDP)
-                               opcode = TX_UDP_PKT;
-               } else if (protocol == ETH_P_IPV6) {
-                       l4proto = ipv6_hdr(skb)->nexthdr;
+       if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+               return IRQ_NONE;
  
-                       if (l4proto == IPPROTO_TCP)
-                               opcode = TX_TCPV6_PKT;
-                       else if (l4proto == IPPROTO_UDP)
-                               opcode = TX_UDPV6_PKT;
-               }
-       }
-       first_desc->tcp_hdr_offset += skb_transport_offset(skb);
-       first_desc->ip_hdr_offset += skb_network_offset(skb);
-       qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+       napi_schedule(&sds_ring->napi);
  
-       return 0;
+       return IRQ_HANDLED;
  }
  
- static int
- qlcnic_map_tx_skb(struct pci_dev *pdev,
-               struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
+ static irqreturn_t qlcnic_msi_intr(int irq, void *data)
  {
-       struct qlcnic_skb_frag *nf;
-       struct skb_frag_struct *frag;
-       int i, nr_frags;
-       dma_addr_t map;
+       struct qlcnic_host_sds_ring *sds_ring = data;
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
  
-       nr_frags = skb_shinfo(skb)->nr_frags;
-       nf = &pbuf->frag_array[0];
+       /* clear interrupt */
+       writel(0xffffffff, adapter->tgt_status_reg);
  
-       map = pci_map_single(pdev, skb->data,
-                       skb_headlen(skb), PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(pdev, map))
-               goto out_err;
+       napi_schedule(&sds_ring->napi);
+       return IRQ_HANDLED;
+ }
  
-       nf->dma = map;
-       nf->length = skb_headlen(skb);
+ static irqreturn_t qlcnic_msix_intr(int irq, void *data)
+ {
+       struct qlcnic_host_sds_ring *sds_ring = data;
  
-       for (i = 0; i < nr_frags; i++) {
-               frag = &skb_shinfo(skb)->frags[i];
-               nf = &pbuf->frag_array[i+1];
-               map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
-                                      DMA_TO_DEVICE);
-               if (dma_mapping_error(&pdev->dev, map))
-                       goto unwind;
-               nf->dma = map;
-               nf->length = skb_frag_size(frag);
-       }
-       return 0;
- unwind:
-       while (--i >= 0) {
-               nf = &pbuf->frag_array[i+1];
-               pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
-       }
-       nf = &pbuf->frag_array[0];
-       pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
- out_err:
-       return -ENOMEM;
- }
- static void
- qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
-                       struct qlcnic_cmd_buffer *pbuf)
- {
-       struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
-       int nr_frags = skb_shinfo(skb)->nr_frags;
-       int i;
-       for (i = 0; i < nr_frags; i++) {
-               nf = &pbuf->frag_array[i+1];
-               pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
-       }
-       nf = &pbuf->frag_array[0];
-       pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
-       pbuf->skb = NULL;
- }
- static inline void
- qlcnic_clear_cmddesc(u64 *desc)
- {
-       desc[0] = 0ULL;
-       desc[2] = 0ULL;
-       desc[7] = 0ULL;
- }
- netdev_tx_t
- qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
- {
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
-       struct qlcnic_cmd_buffer *pbuf;
-       struct qlcnic_skb_frag *buffrag;
-       struct cmd_desc_type0 *hwdesc, *first_desc;
-       struct pci_dev *pdev;
-       struct ethhdr *phdr;
-       int delta = 0;
-       int i, k;
-       u32 producer;
-       int frag_count;
-       u32 num_txd = tx_ring->num_desc;
-       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
-               netif_stop_queue(netdev);
-               return NETDEV_TX_BUSY;
-       }
-       if (adapter->flags & QLCNIC_MACSPOOF) {
-               phdr = (struct ethhdr *)skb->data;
-               if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
-                       goto drop_packet;
-       }
-       frag_count = skb_shinfo(skb)->nr_frags + 1;
-       /* 14 frags supported for normal packet and
-        * 32 frags supported for TSO packet
-        */
-       if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
-               for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
-                       delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
-               if (!__pskb_pull_tail(skb, delta))
-                       goto drop_packet;
-               frag_count = 1 + skb_shinfo(skb)->nr_frags;
-       }
-       if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
-               netif_stop_queue(netdev);
-               if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
-                       netif_start_queue(netdev);
-               else {
-                       adapter->stats.xmit_off++;
-                       return NETDEV_TX_BUSY;
-               }
-       }
-       producer = tx_ring->producer;
-       pbuf = &tx_ring->cmd_buf_arr[producer];
-       pdev = adapter->pdev;
-       first_desc = hwdesc = &tx_ring->desc_head[producer];
-       qlcnic_clear_cmddesc((u64 *)hwdesc);
-       if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
-               adapter->stats.tx_dma_map_error++;
-               goto drop_packet;
-       }
-       pbuf->skb = skb;
-       pbuf->frag_count = frag_count;
-       qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
-       qlcnic_set_tx_port(first_desc, adapter->portnum);
-       for (i = 0; i < frag_count; i++) {
-               k = i % 4;
-               if ((k == 0) && (i > 0)) {
-                       /* move to next desc.*/
-                       producer = get_next_index(producer, num_txd);
-                       hwdesc = &tx_ring->desc_head[producer];
-                       qlcnic_clear_cmddesc((u64 *)hwdesc);
-                       tx_ring->cmd_buf_arr[producer].skb = NULL;
-               }
-               buffrag = &pbuf->frag_array[i];
-               hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
-               switch (k) {
-               case 0:
-                       hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
-                       break;
-               case 1:
-                       hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
-                       break;
-               case 2:
-                       hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
-                       break;
-               case 3:
-                       hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
-                       break;
-               }
-       }
-       tx_ring->producer = get_next_index(producer, num_txd);
-       smp_mb();
-       if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
-               goto unwind_buff;
-       if (adapter->mac_learn)
-               qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
-       adapter->stats.txbytes += skb->len;
-       adapter->stats.xmitcalled++;
-       qlcnic_update_cmd_producer(adapter, tx_ring);
-       return NETDEV_TX_OK;
- unwind_buff:
-       qlcnic_unmap_buffers(pdev, skb, pbuf);
- drop_packet:
-       adapter->stats.txdropped++;
-       dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
- }
- static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
- {
-       struct net_device *netdev = adapter->netdev;
-       u32 temp, temp_state, temp_val;
-       int rv = 0;
-       temp = QLCRD32(adapter, CRB_TEMP_STATE);
-       temp_state = qlcnic_get_temp_state(temp);
-       temp_val = qlcnic_get_temp_val(temp);
-       if (temp_state == QLCNIC_TEMP_PANIC) {
-               dev_err(&netdev->dev,
-                      "Device temperature %d degrees C exceeds"
-                      " maximum allowed. Hardware has been shut down.\n",
-                      temp_val);
-               rv = 1;
-       } else if (temp_state == QLCNIC_TEMP_WARN) {
-               if (adapter->temp == QLCNIC_TEMP_NORMAL) {
-                       dev_err(&netdev->dev,
-                              "Device temperature %d degrees C "
-                              "exceeds operating range."
-                              " Immediate action needed.\n",
-                              temp_val);
-               }
-       } else {
-               if (adapter->temp == QLCNIC_TEMP_WARN) {
-                       dev_info(&netdev->dev,
-                              "Device temperature is now %d degrees C"
-                              " in normal range.\n", temp_val);
-               }
-       }
-       adapter->temp = temp_state;
-       return rv;
- }
- void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
- {
-       struct net_device *netdev = adapter->netdev;
-       if (adapter->ahw->linkup && !linkup) {
-               netdev_info(netdev, "NIC Link is down\n");
-               adapter->ahw->linkup = 0;
-               if (netif_running(netdev)) {
-                       netif_carrier_off(netdev);
-                       netif_stop_queue(netdev);
-               }
-       } else if (!adapter->ahw->linkup && linkup) {
-               netdev_info(netdev, "NIC Link is up\n");
-               adapter->ahw->linkup = 1;
-               if (netif_running(netdev)) {
-                       netif_carrier_on(netdev);
-                       netif_wake_queue(netdev);
-               }
-       }
- }
- static void qlcnic_tx_timeout(struct net_device *netdev)
- {
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       if (test_bit(__QLCNIC_RESETTING, &adapter->state))
-               return;
-       dev_err(&netdev->dev, "transmit timeout, resetting.\n");
-       if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
-               adapter->need_fw_reset = 1;
-       else
-               adapter->reset_context = 1;
- }
- static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
- {
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct net_device_stats *stats = &netdev->stats;
-       stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
-       stats->tx_packets = adapter->stats.xmitfinished;
-       stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
-       stats->tx_bytes = adapter->stats.txbytes;
-       stats->rx_dropped = adapter->stats.rxdropped;
-       stats->tx_dropped = adapter->stats.txdropped;
-       return stats;
- }
- static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
- {
-       u32 status;
-       status = readl(adapter->isr_int_vec);
-       if (!(status & adapter->int_vec_bit))
-               return IRQ_NONE;
-       /* check interrupt state machine, to be sure */
-       status = readl(adapter->crb_int_state_reg);
-       if (!ISR_LEGACY_INT_TRIGGERED(status))
-               return IRQ_NONE;
-       writel(0xffffffff, adapter->tgt_status_reg);
-       /* read twice to ensure write is flushed */
-       readl(adapter->isr_int_vec);
-       readl(adapter->isr_int_vec);
-       return IRQ_HANDLED;
- }
- static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
- {
-       struct qlcnic_host_sds_ring *sds_ring = data;
-       struct qlcnic_adapter *adapter = sds_ring->adapter;
-       if (adapter->flags & QLCNIC_MSIX_ENABLED)
-               goto done;
-       else if (adapter->flags & QLCNIC_MSI_ENABLED) {
-               writel(0xffffffff, adapter->tgt_status_reg);
-               goto done;
-       }
-       if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
-               return IRQ_NONE;
- done:
-       adapter->diag_cnt++;
-       qlcnic_enable_int(sds_ring);
-       return IRQ_HANDLED;
- }
- static irqreturn_t qlcnic_intr(int irq, void *data)
- {
-       struct qlcnic_host_sds_ring *sds_ring = data;
-       struct qlcnic_adapter *adapter = sds_ring->adapter;
-       if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
-               return IRQ_NONE;
-       napi_schedule(&sds_ring->napi);
-       return IRQ_HANDLED;
- }
- static irqreturn_t qlcnic_msi_intr(int irq, void *data)
- {
-       struct qlcnic_host_sds_ring *sds_ring = data;
-       struct qlcnic_adapter *adapter = sds_ring->adapter;
-       /* clear interrupt */
-       writel(0xffffffff, adapter->tgt_status_reg);
-       napi_schedule(&sds_ring->napi);
-       return IRQ_HANDLED;
- }
- static irqreturn_t qlcnic_msix_intr(int irq, void *data)
- {
-       struct qlcnic_host_sds_ring *sds_ring = data;
-       napi_schedule(&sds_ring->napi);
-       return IRQ_HANDLED;
- }
- static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
- {
-       u32 sw_consumer, hw_consumer;
-       int count = 0, i;
-       struct qlcnic_cmd_buffer *buffer;
-       struct pci_dev *pdev = adapter->pdev;
-       struct net_device *netdev = adapter->netdev;
-       struct qlcnic_skb_frag *frag;
-       int done;
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
-       if (!spin_trylock(&adapter->tx_clean_lock))
-               return 1;
-       sw_consumer = tx_ring->sw_consumer;
-       hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
-       while (sw_consumer != hw_consumer) {
-               buffer = &tx_ring->cmd_buf_arr[sw_consumer];
-               if (buffer->skb) {
-                       frag = &buffer->frag_array[0];
-                       pci_unmap_single(pdev, frag->dma, frag->length,
-                                        PCI_DMA_TODEVICE);
-                       frag->dma = 0ULL;
-                       for (i = 1; i < buffer->frag_count; i++) {
-                               frag++;
-                               pci_unmap_page(pdev, frag->dma, frag->length,
-                                              PCI_DMA_TODEVICE);
-                               frag->dma = 0ULL;
-                       }
-                       adapter->stats.xmitfinished++;
-                       dev_kfree_skb_any(buffer->skb);
-                       buffer->skb = NULL;
-               }
-               sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
-               if (++count >= MAX_STATUS_HANDLE)
-                       break;
-       }
-       if (count && netif_running(netdev)) {
-               tx_ring->sw_consumer = sw_consumer;
-               smp_mb();
-               if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
-                       if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
-                               netif_wake_queue(netdev);
-                               adapter->stats.xmit_on++;
-                       }
-               }
-               adapter->tx_timeo_cnt = 0;
-       }
-       /*
-        * If everything is freed up to consumer then check if the ring is full
-        * If the ring is full then check if more needs to be freed and
-        * schedule the call back again.
-        *
-        * This happens when there are 2 CPUs. One could be freeing and the
-        * other filling it. If the ring is full when we get out of here and
-        * the card has already interrupted the host then the host can miss the
-        * interrupt.
-        *
-        * There is still a possible race condition and the host could miss an
-        * interrupt. The card has to take care of this.
-        */
-       hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
-       done = (sw_consumer == hw_consumer);
-       spin_unlock(&adapter->tx_clean_lock);
-       return done;
- }
- static int qlcnic_poll(struct napi_struct *napi, int budget)
- {
-       struct qlcnic_host_sds_ring *sds_ring =
-               container_of(napi, struct qlcnic_host_sds_ring, napi);
-       struct qlcnic_adapter *adapter = sds_ring->adapter;
-       int tx_complete;
-       int work_done;
-       tx_complete = qlcnic_process_cmd_ring(adapter);
-       work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-       if ((work_done < budget) && tx_complete) {
-               napi_complete(&sds_ring->napi);
-               if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
-                       qlcnic_enable_int(sds_ring);
-       }
-       return work_done;
- }
- static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
- {
-       struct qlcnic_host_sds_ring *sds_ring =
-               container_of(napi, struct qlcnic_host_sds_ring, napi);
-       struct qlcnic_adapter *adapter = sds_ring->adapter;
-       int work_done;
-       work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-       if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
-               if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
-                       qlcnic_enable_int(sds_ring);
-       }
-       return work_done;
- }
+       napi_schedule(&sds_ring->napi);
+       return IRQ_HANDLED;
+ }
  
  #ifdef CONFIG_NET_POLL_CONTROLLER
  static void qlcnic_poll_controller(struct net_device *netdev)
@@@ -2871,7 -2252,7 +2252,7 @@@ qlcnic_fwinit_work(struct work_struct *
                return;
        }
  
-       if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+       if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
                qlcnic_api_unlock(adapter);
                goto wait_npar;
        }
@@@ -2987,9 -2368,9 +2368,9 @@@ qlcnic_detach_work(struct work_struct *
                goto err_ret;
        }
  
-       if (adapter->temp == QLCNIC_TEMP_PANIC) {
+       if (adapter->ahw->temp == QLCNIC_TEMP_PANIC) {
                dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
-                       adapter->temp);
+                       adapter->ahw->temp);
                goto err_ret;
        }
  
@@@ -3114,7 -2495,7 +2495,7 @@@ qlcnic_attach_work(struct work_struct *
        struct net_device *netdev = adapter->netdev;
        u32 npar_state;
  
-       if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
                npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
                if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
                        qlcnic_clr_all_drv_state(adapter, 0);
@@@ -3171,7 -2552,7 +2552,7 @@@ qlcnic_check_health(struct qlcnic_adapt
                if (adapter->need_fw_reset)
                        goto detach;
  
-               if (adapter->reset_context && auto_fw_reset) {
+               if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) {
                        qlcnic_reset_hw_context(adapter);
                        adapter->netdev->trans_start = jiffies;
                }
  
        qlcnic_dev_request_reset(adapter);
  
-       if (auto_fw_reset)
+       if (qlcnic_auto_fw_reset)
                clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
  
        dev_err(&adapter->pdev->dev, "firmware hang detected\n");
@@@ -3211,8 -2592,8 +2592,8 @@@ detach
        adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
                QLCNIC_DEV_NEED_RESET;
  
-       if (auto_fw_reset &&
-               !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
+       if (qlcnic_auto_fw_reset && !test_and_set_bit(__QLCNIC_RESETTING,
+                                                     &adapter->state)) {
  
                qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
                QLCDB(adapter, DRV, "fw recovery scheduled.\n");
@@@ -3283,7 -2664,7 +2664,7 @@@ static int qlcnic_attach_func(struct pc
        if (qlcnic_api_lock(adapter))
                return -EINVAL;
  
-       if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
+       if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
                adapter->need_fw_reset = 1;
                set_bit(__QLCNIC_START_FW, &adapter->state);
                QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
@@@ -3395,96 -2776,9 +2776,9 @@@ qlcnicvf_start_firmware(struct qlcnic_a
        return err;
  }
  
- static int
- qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+ int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
  {
-       return -EOPNOTSUPP;
- }
- static int
- qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
- {
-       return -EOPNOTSUPP;
- }
- static ssize_t
- qlcnic_store_bridged_mode(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t len)
- {
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       unsigned long new;
-       int ret = -EINVAL;
-       if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
-               goto err_out;
-       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
-               goto err_out;
-       if (strict_strtoul(buf, 2, &new))
-               goto err_out;
-       if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
-               ret = len;
- err_out:
-       return ret;
- }
- static ssize_t
- qlcnic_show_bridged_mode(struct device *dev,
-               struct device_attribute *attr, char *buf)
- {
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       int bridged_mode = 0;
-       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
-               bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
-       return sprintf(buf, "%d\n", bridged_mode);
- }
- static struct device_attribute dev_attr_bridged_mode = {
-        .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
-        .show = qlcnic_show_bridged_mode,
-        .store = qlcnic_store_bridged_mode,
- };
- static ssize_t
- qlcnic_store_diag_mode(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t len)
- {
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       unsigned long new;
-       if (strict_strtoul(buf, 2, &new))
-               return -EINVAL;
-       if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
-               adapter->flags ^= QLCNIC_DIAG_ENABLED;
-       return len;
- }
- static ssize_t
- qlcnic_show_diag_mode(struct device *dev,
-               struct device_attribute *attr, char *buf)
- {
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       return sprintf(buf, "%d\n",
-                       !!(adapter->flags & QLCNIC_DIAG_ENABLED));
- }
- static struct device_attribute dev_attr_diag_mode = {
-       .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
-       .show = qlcnic_show_diag_mode,
-       .store = qlcnic_store_diag_mode,
- };
- int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
- {
-       if (!use_msi_x && !use_msi) {
+       if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
                netdev_info(netdev, "no msix or msi support, hence no rss\n");
                return -EINVAL;
        }
@@@ -3532,859 -2826,6 +2826,6 @@@ int qlcnic_set_max_rss(struct qlcnic_ad
        return err;
  }
  
- static int
- qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, u8 *state,
-                       u8 *rate)
- {
-       *rate = LSB(beacon);
-       *state = MSB(beacon);
-       QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
-       if (!*state) {
-               *rate = __QLCNIC_MAX_LED_RATE;
-               return 0;
-       } else if (*state > __QLCNIC_MAX_LED_STATE)
-               return -EINVAL;
-       if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
-               return -EINVAL;
-       return 0;
- }
- static ssize_t
- qlcnic_store_beacon(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t len)
- {
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       int max_sds_rings = adapter->max_sds_rings;
-       u16 beacon;
-       u8 b_state, b_rate;
-       int err;
-       if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
-               dev_warn(dev, "LED test not supported for non "
-                               "privilege function\n");
-               return -EOPNOTSUPP;
-       }
-       if (len != sizeof(u16))
-               return QL_STATUS_INVALID_PARAM;
-       memcpy(&beacon, buf, sizeof(u16));
-       err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
-       if (err)
-               return err;
-       if (adapter->ahw->beacon_state == b_state)
-               return len;
-       rtnl_lock();
-       if (!adapter->ahw->beacon_state)
-               if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
-                       rtnl_unlock();
-                       return -EBUSY;
-               }
-       if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
-               err = -EIO;
-               goto out;
-       }
-       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
-               err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
-               if (err)
-                       goto out;
-               set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
-       }
-       err = qlcnic_config_led(adapter, b_state, b_rate);
-       if (!err) {
-               err = len;
-               adapter->ahw->beacon_state = b_state;
-       }
-       if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
-               qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
-  out:
-       if (!adapter->ahw->beacon_state)
-               clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
-       rtnl_unlock();
-       return err;
- }
- static ssize_t
- qlcnic_show_beacon(struct device *dev,
-               struct device_attribute *attr, char *buf)
- {
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
- }
- static struct device_attribute dev_attr_beacon = {
-       .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
-       .show = qlcnic_show_beacon,
-       .store = qlcnic_store_beacon,
- };
- static int
- qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
-               loff_t offset, size_t size)
- {
-       size_t crb_size = 4;
-       if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
-               return -EIO;
-       if (offset < QLCNIC_PCI_CRBSPACE) {
-               if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
-                                       QLCNIC_PCI_CAMQM_END))
-                       crb_size = 8;
-               else
-                       return -EINVAL;
-       }
-       if ((size != crb_size) || (offset & (crb_size-1)))
-               return  -EINVAL;
-       return 0;
- }
- static ssize_t
- qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u32 data;
-       u64 qmdata;
-       int ret;
-       ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
-       if (ret != 0)
-               return ret;
-       if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
-               qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
-               memcpy(buf, &qmdata, size);
-       } else {
-               data = QLCRD32(adapter, offset);
-               memcpy(buf, &data, size);
-       }
-       return size;
- }
- static ssize_t
- qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u32 data;
-       u64 qmdata;
-       int ret;
-       ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
-       if (ret != 0)
-               return ret;
-       if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
-               memcpy(&qmdata, buf, size);
-               qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
-       } else {
-               memcpy(&data, buf, size);
-               QLCWR32(adapter, offset, data);
-       }
-       return size;
- }
- static int
- qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
-               loff_t offset, size_t size)
- {
-       if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
-               return -EIO;
-       if ((size != 8) || (offset & 0x7))
-               return  -EIO;
-       return 0;
- }
- static ssize_t
- qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u64 data;
-       int ret;
-       ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
-       if (ret != 0)
-               return ret;
-       if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
-               return -EIO;
-       memcpy(buf, &data, size);
-       return size;
- }
- static ssize_t
- qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u64 data;
-       int ret;
-       ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
-       if (ret != 0)
-               return ret;
-       memcpy(&data, buf, size);
-       if (qlcnic_pci_mem_write_2M(adapter, offset, data))
-               return -EIO;
-       return size;
- }
- static struct bin_attribute bin_attr_crb = {
-       .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_crb,
-       .write = qlcnic_sysfs_write_crb,
- };
- static struct bin_attribute bin_attr_mem = {
-       .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_mem,
-       .write = qlcnic_sysfs_write_mem,
- };
- static int
- validate_pm_config(struct qlcnic_adapter *adapter,
-                       struct qlcnic_pm_func_cfg *pm_cfg, int count)
- {
-       u8 src_pci_func, s_esw_id, d_esw_id;
-       u8 dest_pci_func;
-       int i;
-       for (i = 0; i < count; i++) {
-               src_pci_func = pm_cfg[i].pci_func;
-               dest_pci_func = pm_cfg[i].dest_npar;
-               if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
-                               || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
-                       return QL_STATUS_INVALID_PARAM;
-               if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
-                       return QL_STATUS_INVALID_PARAM;
-               if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
-                       return QL_STATUS_INVALID_PARAM;
-               s_esw_id = adapter->npars[src_pci_func].phy_port;
-               d_esw_id = adapter->npars[dest_pci_func].phy_port;
-               if (s_esw_id != d_esw_id)
-                       return QL_STATUS_INVALID_PARAM;
-       }
-       return 0;
- }
- static ssize_t
- qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_pm_func_cfg *pm_cfg;
-       u32 id, action, pci_func;
-       int count, rem, i, ret;
-       count   = size / sizeof(struct qlcnic_pm_func_cfg);
-       rem     = size % sizeof(struct qlcnic_pm_func_cfg);
-       if (rem)
-               return QL_STATUS_INVALID_PARAM;
-       pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
-       ret = validate_pm_config(adapter, pm_cfg, count);
-       if (ret)
-               return ret;
-       for (i = 0; i < count; i++) {
-               pci_func = pm_cfg[i].pci_func;
-               action = !!pm_cfg[i].action;
-               id = adapter->npars[pci_func].phy_port;
-               ret = qlcnic_config_port_mirroring(adapter, id,
-                                               action, pci_func);
-               if (ret)
-                       return ret;
-       }
-       for (i = 0; i < count; i++) {
-               pci_func = pm_cfg[i].pci_func;
-               id = adapter->npars[pci_func].phy_port;
-               adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
-               adapter->npars[pci_func].dest_npar = id;
-       }
-       return size;
- }
- static ssize_t
- qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
-       int i;
-       if (size != sizeof(pm_cfg))
-               return QL_STATUS_INVALID_PARAM;
-       for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
-               if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
-                       continue;
-               pm_cfg[i].action = adapter->npars[i].enable_pm;
-               pm_cfg[i].dest_npar = 0;
-               pm_cfg[i].pci_func = i;
-       }
-       memcpy(buf, &pm_cfg, size);
-       return size;
- }
- static int
- validate_esw_config(struct qlcnic_adapter *adapter,
-       struct qlcnic_esw_func_cfg *esw_cfg, int count)
- {
-       u32 op_mode;
-       u8 pci_func;
-       int i;
-       op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
-       for (i = 0; i < count; i++) {
-               pci_func = esw_cfg[i].pci_func;
-               if (pci_func >= QLCNIC_MAX_PCI_FUNC)
-                       return QL_STATUS_INVALID_PARAM;
-               if (adapter->op_mode == QLCNIC_MGMT_FUNC)
-                       if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
-                               return QL_STATUS_INVALID_PARAM;
-               switch (esw_cfg[i].op_mode) {
-               case QLCNIC_PORT_DEFAULTS:
-                       if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
-                                               QLCNIC_NON_PRIV_FUNC) {
-                               if (esw_cfg[i].mac_anti_spoof != 0)
-                                       return QL_STATUS_INVALID_PARAM;
-                               if (esw_cfg[i].mac_override != 1)
-                                       return QL_STATUS_INVALID_PARAM;
-                               if (esw_cfg[i].promisc_mode != 1)
-                                       return QL_STATUS_INVALID_PARAM;
-                       }
-                       break;
-               case QLCNIC_ADD_VLAN:
-                       if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
-                               return QL_STATUS_INVALID_PARAM;
-                       if (!esw_cfg[i].op_type)
-                               return QL_STATUS_INVALID_PARAM;
-                       break;
-               case QLCNIC_DEL_VLAN:
-                       if (!esw_cfg[i].op_type)
-                               return QL_STATUS_INVALID_PARAM;
-                       break;
-               default:
-                       return QL_STATUS_INVALID_PARAM;
-               }
-       }
-       return 0;
- }
- static ssize_t
- qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_esw_func_cfg *esw_cfg;
-       struct qlcnic_npar_info *npar;
-       int count, rem, i, ret;
-       u8 pci_func, op_mode = 0;
-       count   = size / sizeof(struct qlcnic_esw_func_cfg);
-       rem     = size % sizeof(struct qlcnic_esw_func_cfg);
-       if (rem)
-               return QL_STATUS_INVALID_PARAM;
-       esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
-       ret = validate_esw_config(adapter, esw_cfg, count);
-       if (ret)
-               return ret;
-       for (i = 0; i < count; i++) {
-               if (adapter->op_mode == QLCNIC_MGMT_FUNC)
-                       if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
-                               return QL_STATUS_INVALID_PARAM;
-               if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
-                       continue;
-               op_mode = esw_cfg[i].op_mode;
-               qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
-               esw_cfg[i].op_mode = op_mode;
-               esw_cfg[i].pci_func = adapter->ahw->pci_func;
-               switch (esw_cfg[i].op_mode) {
-               case QLCNIC_PORT_DEFAULTS:
-                       qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
-                       break;
-               case QLCNIC_ADD_VLAN:
-                       qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
-                       break;
-               case QLCNIC_DEL_VLAN:
-                       esw_cfg[i].vlan_id = 0;
-                       qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
-                       break;
-               }
-       }
-       if (adapter->op_mode != QLCNIC_MGMT_FUNC)
-               goto out;
-       for (i = 0; i < count; i++) {
-               pci_func = esw_cfg[i].pci_func;
-               npar = &adapter->npars[pci_func];
-               switch (esw_cfg[i].op_mode) {
-               case QLCNIC_PORT_DEFAULTS:
-                       npar->promisc_mode = esw_cfg[i].promisc_mode;
-                       npar->mac_override = esw_cfg[i].mac_override;
-                       npar->offload_flags = esw_cfg[i].offload_flags;
-                       npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
-                       npar->discard_tagged = esw_cfg[i].discard_tagged;
-                       break;
-               case QLCNIC_ADD_VLAN:
-                       npar->pvid = esw_cfg[i].vlan_id;
-                       break;
-               case QLCNIC_DEL_VLAN:
-                       npar->pvid = 0;
-                       break;
-               }
-       }
- out:
-       return size;
- }
- static ssize_t
- qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
-       u8 i;
-       if (size != sizeof(esw_cfg))
-               return QL_STATUS_INVALID_PARAM;
-       for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
-               if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
-                       continue;
-               esw_cfg[i].pci_func = i;
-               if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
-                       return QL_STATUS_INVALID_PARAM;
-       }
-       memcpy(buf, &esw_cfg, size);
-       return size;
- }
- static int
- validate_npar_config(struct qlcnic_adapter *adapter,
-                               struct qlcnic_npar_func_cfg *np_cfg, int count)
- {
-       u8 pci_func, i;
-       for (i = 0; i < count; i++) {
-               pci_func = np_cfg[i].pci_func;
-               if (pci_func >= QLCNIC_MAX_PCI_FUNC)
-                       return QL_STATUS_INVALID_PARAM;
-               if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
-                       return QL_STATUS_INVALID_PARAM;
-               if (!IS_VALID_BW(np_cfg[i].min_bw) ||
-                   !IS_VALID_BW(np_cfg[i].max_bw))
-                       return QL_STATUS_INVALID_PARAM;
-       }
-       return 0;
- }
- static ssize_t
- qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_info nic_info;
-       struct qlcnic_npar_func_cfg *np_cfg;
-       int i, count, rem, ret;
-       u8 pci_func;
-       count   = size / sizeof(struct qlcnic_npar_func_cfg);
-       rem     = size % sizeof(struct qlcnic_npar_func_cfg);
-       if (rem)
-               return QL_STATUS_INVALID_PARAM;
-       np_cfg = (struct qlcnic_npar_func_cfg *) buf;
-       ret = validate_npar_config(adapter, np_cfg, count);
-       if (ret)
-               return ret;
-       for (i = 0; i < count ; i++) {
-               pci_func = np_cfg[i].pci_func;
-               ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
-               if (ret)
-                       return ret;
-               nic_info.pci_func = pci_func;
-               nic_info.min_tx_bw = np_cfg[i].min_bw;
-               nic_info.max_tx_bw = np_cfg[i].max_bw;
-               ret = qlcnic_set_nic_info(adapter, &nic_info);
-               if (ret)
-                       return ret;
-               adapter->npars[i].min_bw = nic_info.min_tx_bw;
-               adapter->npars[i].max_bw = nic_info.max_tx_bw;
-       }
-       return size;
- }
- static ssize_t
- qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_info nic_info;
-       struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
-       int i, ret;
-       if (size != sizeof(np_cfg))
-               return QL_STATUS_INVALID_PARAM;
-       for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
-               if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
-                       continue;
-               ret = qlcnic_get_nic_info(adapter, &nic_info, i);
-               if (ret)
-                       return ret;
-               np_cfg[i].pci_func = i;
-               np_cfg[i].op_mode = (u8)nic_info.op_mode;
-               np_cfg[i].port_num = nic_info.phys_port;
-               np_cfg[i].fw_capab = nic_info.capabilities;
-               np_cfg[i].min_bw = nic_info.min_tx_bw ;
-               np_cfg[i].max_bw = nic_info.max_tx_bw;
-               np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
-               np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
-       }
-       memcpy(buf, &np_cfg, size);
-       return size;
- }
- static ssize_t
- qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_esw_statistics port_stats;
-       int ret;
-       if (size != sizeof(struct qlcnic_esw_statistics))
-               return QL_STATUS_INVALID_PARAM;
-       if (offset >= QLCNIC_MAX_PCI_FUNC)
-               return QL_STATUS_INVALID_PARAM;
-       memset(&port_stats, 0, size);
-       ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
-                                                               &port_stats.rx);
-       if (ret)
-               return ret;
-       ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
-                                                               &port_stats.tx);
-       if (ret)
-               return ret;
-       memcpy(buf, &port_stats, size);
-       return size;
- }
- static ssize_t
- qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_esw_statistics esw_stats;
-       int ret;
-       if (size != sizeof(struct qlcnic_esw_statistics))
-               return QL_STATUS_INVALID_PARAM;
-       if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
-               return QL_STATUS_INVALID_PARAM;
-       memset(&esw_stats, 0, size);
-       ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
-                                                               &esw_stats.rx);
-       if (ret)
-               return ret;
-       ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
-                                                               &esw_stats.tx);
-       if (ret)
-               return ret;
-       memcpy(buf, &esw_stats, size);
-       return size;
- }
- static ssize_t
- qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       int ret;
-       if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
-               return QL_STATUS_INVALID_PARAM;
-       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
-                                               QLCNIC_QUERY_RX_COUNTER);
-       if (ret)
-               return ret;
-       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
-                                               QLCNIC_QUERY_TX_COUNTER);
-       if (ret)
-               return ret;
-       return size;
- }
- static ssize_t
- qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       int ret;
-       if (offset >= QLCNIC_MAX_PCI_FUNC)
-               return QL_STATUS_INVALID_PARAM;
-       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
-                                               QLCNIC_QUERY_RX_COUNTER);
-       if (ret)
-               return ret;
-       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
-                                               QLCNIC_QUERY_TX_COUNTER);
-       if (ret)
-               return ret;
-       return size;
- }
- static ssize_t
- qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
- {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
-       struct qlcnic_pci_info *pci_info;
-       int i, ret;
-       if (size != sizeof(pci_cfg))
-               return QL_STATUS_INVALID_PARAM;
-       pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
-       if (!pci_info)
-               return -ENOMEM;
-       ret = qlcnic_get_pci_info(adapter, pci_info);
-       if (ret) {
-               kfree(pci_info);
-               return ret;
-       }
-       for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
-               pci_cfg[i].pci_func = pci_info[i].id;
-               pci_cfg[i].func_type = pci_info[i].type;
-               pci_cfg[i].port_num = pci_info[i].default_port;
-               pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
-               pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
-               memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
-       }
-       memcpy(buf, &pci_cfg, size);
-       kfree(pci_info);
-       return size;
- }
- static struct bin_attribute bin_attr_npar_config = {
-       .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_npar_config,
-       .write = qlcnic_sysfs_write_npar_config,
- };
- static struct bin_attribute bin_attr_pci_config = {
-       .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_pci_config,
-       .write = NULL,
- };
- static struct bin_attribute bin_attr_port_stats = {
-       .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_get_port_stats,
-       .write = qlcnic_sysfs_clear_port_stats,
- };
- static struct bin_attribute bin_attr_esw_stats = {
-       .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_get_esw_stats,
-       .write = qlcnic_sysfs_clear_esw_stats,
- };
- static struct bin_attribute bin_attr_esw_config = {
-       .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_esw_config,
-       .write = qlcnic_sysfs_write_esw_config,
- };
- static struct bin_attribute bin_attr_pm_config = {
-       .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_pm_config,
-       .write = qlcnic_sysfs_write_pm_config,
- };
- static void
- qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
- {
-       struct device *dev = &adapter->pdev->dev;
-       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
-               if (device_create_file(dev, &dev_attr_bridged_mode))
-                       dev_warn(dev,
-                               "failed to create bridged_mode sysfs entry\n");
- }
- static void
- qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
- {
-       struct device *dev = &adapter->pdev->dev;
-       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
-               device_remove_file(dev, &dev_attr_bridged_mode);
- }
- static void
- qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
- {
-       struct device *dev = &adapter->pdev->dev;
-       u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
-       if (device_create_bin_file(dev, &bin_attr_port_stats))
-               dev_info(dev, "failed to create port stats sysfs entry");
-       if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
-               return;
-       if (device_create_file(dev, &dev_attr_diag_mode))
-               dev_info(dev, "failed to create diag_mode sysfs entry\n");
-       if (device_create_bin_file(dev, &bin_attr_crb))
-               dev_info(dev, "failed to create crb sysfs entry\n");
-       if (device_create_bin_file(dev, &bin_attr_mem))
-               dev_info(dev, "failed to create mem sysfs entry\n");
-       if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
-               return;
-       if (device_create_bin_file(dev, &bin_attr_pci_config))
-               dev_info(dev, "failed to create pci config sysfs entry");
-       if (device_create_file(dev, &dev_attr_beacon))
-               dev_info(dev, "failed to create beacon sysfs entry");
-       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
-               return;
-       if (device_create_bin_file(dev, &bin_attr_esw_config))
-               dev_info(dev, "failed to create esw config sysfs entry");
-       if (adapter->op_mode != QLCNIC_MGMT_FUNC)
-               return;
-       if (device_create_bin_file(dev, &bin_attr_npar_config))
-               dev_info(dev, "failed to create npar config sysfs entry");
-       if (device_create_bin_file(dev, &bin_attr_pm_config))
-               dev_info(dev, "failed to create pm config sysfs entry");
-       if (device_create_bin_file(dev, &bin_attr_esw_stats))
-               dev_info(dev, "failed to create eswitch stats sysfs entry");
- }
- static void
- qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
- {
-       struct device *dev = &adapter->pdev->dev;
-       u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
-       device_remove_bin_file(dev, &bin_attr_port_stats);
-       if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
-               return;
-       device_remove_file(dev, &dev_attr_diag_mode);
-       device_remove_bin_file(dev, &bin_attr_crb);
-       device_remove_bin_file(dev, &bin_attr_mem);
-       if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
-               return;
-       device_remove_bin_file(dev, &bin_attr_pci_config);
-       device_remove_file(dev, &dev_attr_beacon);
-       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
-               return;
-       device_remove_bin_file(dev, &bin_attr_esw_config);
-       if (adapter->op_mode != QLCNIC_MGMT_FUNC)
-               return;
-       device_remove_bin_file(dev, &bin_attr_npar_config);
-       device_remove_bin_file(dev, &bin_attr_pm_config);
-       device_remove_bin_file(dev, &bin_attr_esw_stats);
- }
  #ifdef CONFIG_INET
  
  #define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
@@@ -4523,7 -2964,7 +2964,7 @@@ static voi
  qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
  { }
  #endif
- static const struct pci_error_handlers qlcnic_err_handler = {
+ static struct pci_error_handlers qlcnic_err_handler = {
        .error_detected = qlcnic_io_error_detected,
        .slot_reset = qlcnic_io_slot_reset,
        .resume = qlcnic_io_resume,
@@@ -4533,7 -2974,7 +2974,7 @@@ static struct pci_driver qlcnic_driver 
        .name = qlcnic_driver_name,
        .id_table = qlcnic_pci_tbl,
        .probe = qlcnic_probe,
-       .remove = __devexit_p(qlcnic_remove),
+       .remove = qlcnic_remove,
  #ifdef CONFIG_PM
        .suspend = qlcnic_suspend,
        .resume = qlcnic_resume,
index dcce10eb5ad91cd7333720ad44a763a814982fa4,430a9ac56091ec5c8226708a7735ae099a3c016c..065704c605d5230275b3dbf2d2c0ce1decc3818e
@@@ -369,12 -369,12 +369,12 @@@ static int do_validate_mem(struct pcmci
                }
        }
  
 -      free_region(res2);
 -      free_region(res1);
 -
        dev_dbg(&s->dev, "cs: memory probe 0x%06lx-0x%06lx: %p %p %u %u %u",
                base, base+size-1, res1, res2, ret, info1, info2);
  
 +      free_region(res2);
 +      free_region(res1);
 +
        if ((ret) || (info1 != info2) || (info1 == 0))
                return -EINVAL;
  
@@@ -1199,7 -1199,7 +1199,7 @@@ static const struct attribute_group rsr
        .attrs = pccard_rsrc_attributes,
  };
  
- static int __devinit pccard_sysfs_add_rsrc(struct device *dev,
+ static int pccard_sysfs_add_rsrc(struct device *dev,
                                           struct class_interface *class_intf)
  {
        struct pcmcia_socket *s = dev_get_drvdata(dev);
        return sysfs_create_group(&dev->kobj, &rsrc_attributes);
  }
  
- static void __devexit pccard_sysfs_remove_rsrc(struct device *dev,
+ static void pccard_sysfs_remove_rsrc(struct device *dev,
                                               struct class_interface *class_intf)
  {
        struct pcmcia_socket *s = dev_get_drvdata(dev);
  static struct class_interface pccard_rsrc_interface __refdata = {
        .class = &pcmcia_socket_class,
        .add_dev = &pccard_sysfs_add_rsrc,
-       .remove_dev = __devexit_p(&pccard_sysfs_remove_rsrc),
+       .remove_dev = &pccard_sysfs_remove_rsrc,
  };
  
  static int __init nonstatic_sysfs_init(void)
index a3c4c030007d7133f9cedbd468befd0ff7781793,f1e323924f1272ca3eea8546bdefb26613837559..d85446021ddb36a1ce206fef33a1a81cde0464ab
@@@ -764,7 -764,7 +764,7 @@@ int rpmsg_send_offchannel_raw(struct rp
  
        /* add message to the remote processor's virtqueue */
        err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
-       if (err < 0) {
+       if (err) {
                /*
                 * need to reclaim the buffer here, otherwise it's lost
                 * (memory won't leak, but rpmsg won't use it again for TX).
  
        /* tell the remote processor it has a pending message to read */
        virtqueue_kick(vrp->svq);
-       err = 0;
  out:
        mutex_unlock(&vrp->tx_lock);
        return err;
@@@ -841,7 -839,7 +839,7 @@@ static void rpmsg_recv_done(struct virt
                /* farewell, ept, we don't need you anymore */
                kref_put(&ept->refcount, __ept_release);
        } else
 -              dev_warn(dev, "msg received with no recepient\n");
 +              dev_warn(dev, "msg received with no recipient\n");
  
        /* publish the real size of the buffer */
        sg_init_one(&sg, msg, RPMSG_BUF_SIZE);
@@@ -980,7 -978,7 +978,7 @@@ static int rpmsg_probe(struct virtio_de
  
                err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
                                                                GFP_KERNEL);
-               WARN_ON(err < 0); /* sanity check; this can't really happen */
+               WARN_ON(err); /* sanity check; this can't really happen */
        }
  
        /* suppress "tx-complete" interrupts */
@@@ -1024,7 -1022,7 +1022,7 @@@ static int rpmsg_remove_device(struct d
        return 0;
  }
  
- static void __devexit rpmsg_remove(struct virtio_device *vdev)
+ static void rpmsg_remove(struct virtio_device *vdev)
  {
        struct virtproc_info *vrp = vdev->priv;
        int ret;
@@@ -1065,7 -1063,7 +1063,7 @@@ static struct virtio_driver virtio_ipc_
        .driver.owner   = THIS_MODULE,
        .id_table       = id_table,
        .probe          = rpmsg_probe,
-       .remove         = __devexit_p(rpmsg_remove),
+       .remove         = rpmsg_remove,
  };
  
  static int __init rpmsg_init(void)
diff --combined drivers/scsi/Kconfig
index ba7aa886e23e74073e137ea8a876d7c0358804af,142f632e2a2e45c7377709989d5495382e74f5c9..65e764cb33aff2ace6c579eb45989f22d238f120
@@@ -603,6 -603,7 +603,7 @@@ config SCSI_ARCMS
  
  source "drivers/scsi/megaraid/Kconfig.megaraid"
  source "drivers/scsi/mpt2sas/Kconfig"
+ source "drivers/scsi/mpt3sas/Kconfig"
  source "drivers/scsi/ufs/Kconfig"
  
  config SCSI_HPTIOP
@@@ -882,7 -883,7 +883,7 @@@ config SCSI_IBMVSCS
          This is the IBM POWER Virtual SCSI Client
  
          To compile this driver as a module, choose M here: the
 -        module will be called ibmvscsic.
 +        module will be called ibmvscsi.
  
  config SCSI_IBMVSCSIS
        tristate "IBM Virtual SCSI Server support"
@@@ -1812,6 -1813,7 +1813,7 @@@ config SCSI_VIRTI
            This is the virtual HBA driver for virtio.  If the kernel will
            be used in a virtual machine, say Y or M.
  
+ source "drivers/scsi/csiostor/Kconfig"
  
  endif # SCSI_LOWLEVEL
  
index b595b9ddd5367e30d2ba7e4f290951072097ef0f,ef32dc1bbc802901566da71804a5001b8c709396..1335283412e2d1cb951083d116b8ce8802f1d405
@@@ -72,7 -72,7 +72,7 @@@ int pow_receive_group = 15
  module_param(pow_receive_group, int, 0444);
  MODULE_PARM_DESC(pow_receive_group, "\n"
        "\tPOW group to receive packets from. All ethernet hardware\n"
 -      "\twill be configured to send incomming packets to this POW\n"
 +      "\twill be configured to send incoming packets to this POW\n"
        "\tgroup. Also any other software can submit packets to this\n"
        "\tgroup for the kernel to process.");
  
@@@ -169,7 -169,7 +169,7 @@@ static void cvm_oct_periodic_worker(str
                queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ);
   }
  
- static __devinit void cvm_oct_configure_common_hw(void)
+ static void cvm_oct_configure_common_hw(void)
  {
        /* Setup the FPA */
        cvmx_fpa_enable();
@@@ -586,7 -586,7 +586,7 @@@ static const struct net_device_ops cvm_
  
  extern void octeon_mdiobus_force_mod_depencency(void);
  
- static struct device_node * __devinit cvm_oct_of_get_child(const struct device_node *parent,
+ static struct device_node *cvm_oct_of_get_child(const struct device_node *parent,
                                                           int reg_val)
  {
        struct device_node *node = NULL;
        return node;
  }
  
- static struct device_node * __devinit cvm_oct_node_for_port(struct device_node *pip,
+ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
                                                            int interface, int port)
  {
        struct device_node *ni, *np;
        return np;
  }
  
- static int __devinit cvm_oct_probe(struct platform_device *pdev)
+ static int cvm_oct_probe(struct platform_device *pdev)
  {
        int num_interfaces;
        int interface;
        return 0;
  }
  
- static int __devexit cvm_oct_remove(struct platform_device *pdev)
+ static int cvm_oct_remove(struct platform_device *pdev)
  {
        int port;
  
@@@ -874,7 -874,7 +874,7 @@@ MODULE_DEVICE_TABLE(of, cvm_oct_match)
  
  static struct platform_driver cvm_oct_driver = {
        .probe          = cvm_oct_probe,
-       .remove         = __devexit_p(cvm_oct_remove),
+       .remove         = cvm_oct_remove,
        .driver         = {
                .owner  = THIS_MODULE,
                .name   = KBUILD_MODNAME,
index be793883413d8d3cd0a158e1c2a75b654579c05e,2e8d06f198aeb49fe2ac3ef4c5dbed820abac415..6917a9e938e7b152eac6e9fddcca399961c796ba
@@@ -704,16 -704,17 +704,17 @@@ static void session_maintenance_work(st
  static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
                struct sbp_target_agent *agent)
  {
-       __be32 state;
+       int state;
  
        switch (tcode) {
        case TCODE_READ_QUADLET_REQUEST:
                pr_debug("tgt_agent AGENT_STATE READ\n");
  
                spin_lock_bh(&agent->lock);
-               state = cpu_to_be32(agent->state);
+               state = agent->state;
                spin_unlock_bh(&agent->lock);
-               memcpy(data, &state, sizeof(state));
+               *(__be32 *)data = cpu_to_be32(state);
  
                return RCODE_COMPLETE;
  
@@@ -1718,7 -1719,7 +1719,7 @@@ static struct se_node_acl *sbp_alloc_fa
  
        nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
        if (!nacl) {
 -              pr_err("Unable to alocate struct sbp_nacl\n");
 +              pr_err("Unable to allocate struct sbp_nacl\n");
                return NULL;
        }
  
@@@ -2207,20 -2208,23 +2208,23 @@@ static struct se_portal_group *sbp_make
        tport->mgt_agt = sbp_management_agent_register(tport);
        if (IS_ERR(tport->mgt_agt)) {
                ret = PTR_ERR(tport->mgt_agt);
-               kfree(tpg);
-               return ERR_PTR(ret);
+               goto out_free_tpg;
        }
  
        ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
                        &tpg->se_tpg, (void *)tpg,
                        TRANSPORT_TPG_TYPE_NORMAL);
-       if (ret < 0) {
-               sbp_management_agent_unregister(tport->mgt_agt);
-               kfree(tpg);
-               return ERR_PTR(ret);
-       }
+       if (ret < 0)
+               goto out_unreg_mgt_agt;
  
        return &tpg->se_tpg;
+ out_unreg_mgt_agt:
+       sbp_management_agent_unregister(tport->mgt_agt);
+ out_free_tpg:
+       tport->tpg = NULL;
+       kfree(tpg);
+       return ERR_PTR(ret);
  }
  
  static void sbp_drop_tpg(struct se_portal_group *se_tpg)
diff --combined fs/btrfs/extent-tree.c
index 604befdcc35992ee628ef1ceae0012dff2340949,a8b8adc0507059137898cae2e058570e11843291..c61ebb63056158defcec07c8b8960c52d2ab572b
@@@ -33,6 -33,7 +33,7 @@@
  #include "volumes.h"
  #include "locking.h"
  #include "free-space-cache.h"
+ #include "math.h"
  
  #undef SCRAMBLE_DELAYED_REFS
  
@@@ -649,24 -650,6 +650,6 @@@ void btrfs_clear_space_info_full(struc
        rcu_read_unlock();
  }
  
- static u64 div_factor(u64 num, int factor)
- {
-       if (factor == 10)
-               return num;
-       num *= factor;
-       do_div(num, 10);
-       return num;
- }
- static u64 div_factor_fine(u64 num, int factor)
- {
-       if (factor == 100)
-               return num;
-       num *= factor;
-       do_div(num, 100);
-       return num;
- }
  u64 btrfs_find_block_group(struct btrfs_root *root,
                           u64 search_start, u64 search_hint, int owner)
  {
@@@ -1835,7 -1818,7 +1818,7 @@@ static int btrfs_discard_extent(struct 
  
  
        /* Tell the block device(s) that the sectors can be discarded */
-       ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
+       ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
                              bytenr, &num_bytes, &bbio, 0);
        /* Error condition is -ENOMEM */
        if (!ret) {
@@@ -2314,6 -2297,9 +2297,9 @@@ static noinline int run_clustered_refs(
                                kfree(extent_op);
  
                                if (ret) {
+                                       list_del_init(&locked_ref->cluster);
+                                       mutex_unlock(&locked_ref->mutex);
                                        printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
                                        spin_lock(&delayed_refs->lock);
                                        return ret;
                count++;
  
                if (ret) {
+                       if (locked_ref) {
+                               list_del_init(&locked_ref->cluster);
+                               mutex_unlock(&locked_ref->mutex);
+                       }
                        printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
                        spin_lock(&delayed_refs->lock);
                        return ret;
@@@ -3661,7 -3651,7 +3651,7 @@@ out
  
  static int can_overcommit(struct btrfs_root *root,
                          struct btrfs_space_info *space_info, u64 bytes,
-                         int flush)
+                         enum btrfs_reserve_flush_enum flush)
  {
        u64 profile = btrfs_get_alloc_profile(root, 0);
        u64 avail;
                avail >>= 1;
  
        /*
-        * If we aren't flushing don't let us overcommit too much, say
-        * 1/8th of the space.  If we can flush, let it overcommit up to
-        * 1/2 of the space.
+        * If we aren't flushing all things, let us overcommit up to
+        * 1/2th of the space. If we can flush, don't let us overcommit
+        * too much, let it overcommit up to 1/8 of the space.
         */
-       if (flush)
+       if (flush == BTRFS_RESERVE_FLUSH_ALL)
                avail >>= 3;
        else
                avail >>= 1;
        return 0;
  }
  
+ static int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
+                                              unsigned long nr_pages,
+                                              enum wb_reason reason)
+ {
+       if (!writeback_in_progress(sb->s_bdi) &&
+           down_read_trylock(&sb->s_umount)) {
+               writeback_inodes_sb_nr(sb, nr_pages, reason);
+               up_read(&sb->s_umount);
+               return 1;
+       }
+       return 0;
+ }
  /*
   * shrink metadata reservation for delalloc
   */
@@@ -3713,6 -3717,7 +3717,7 @@@ static void shrink_delalloc(struct btrf
        long time_left;
        unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
        int loops = 0;
+       enum btrfs_reserve_flush_enum flush;
  
        trans = (struct btrfs_trans_handle *)current->journal_info;
        block_rsv = &root->fs_info->delalloc_block_rsv;
        while (delalloc_bytes && loops < 3) {
                max_reclaim = min(delalloc_bytes, to_reclaim);
                nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
-               writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
-                                              WB_REASON_FS_FREE_SPACE);
+               writeback_inodes_sb_nr_if_idle_safe(root->fs_info->sb,
+                                                   nr_pages,
+                                                   WB_REASON_FS_FREE_SPACE);
  
                /*
                 * We need to wait for the async pages to actually start before
                wait_event(root->fs_info->async_submit_wait,
                           !atomic_read(&root->fs_info->async_delalloc_pages));
  
+               if (!trans)
+                       flush = BTRFS_RESERVE_FLUSH_ALL;
+               else
+                       flush = BTRFS_RESERVE_NO_FLUSH;
                spin_lock(&space_info->lock);
-               if (can_overcommit(root, space_info, orig, !trans)) {
+               if (can_overcommit(root, space_info, orig, flush)) {
                        spin_unlock(&space_info->lock);
                        break;
                }
@@@ -3899,7 -3909,8 +3909,8 @@@ static int flush_space(struct btrfs_roo
   */
  static int reserve_metadata_bytes(struct btrfs_root *root,
                                  struct btrfs_block_rsv *block_rsv,
-                                 u64 orig_bytes, int flush)
+                                 u64 orig_bytes,
+                                 enum btrfs_reserve_flush_enum flush)
  {
        struct btrfs_space_info *space_info = block_rsv->space_info;
        u64 used;
@@@ -3912,10 -3923,11 +3923,11 @@@ again
        ret = 0;
        spin_lock(&space_info->lock);
        /*
-        * We only want to wait if somebody other than us is flushing and we are
-        * actually alloed to flush.
+        * We only want to wait if somebody other than us is flushing and we
+        * are actually allowed to flush all things.
         */
-       while (flush && !flushing && space_info->flush) {
+       while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
+              space_info->flush) {
                spin_unlock(&space_info->lock);
                /*
                 * If we have a trans handle we can't wait because the flusher
         * Couldn't make our reservation, save our place so while we're trying
         * to reclaim space we can actually use it instead of somebody else
         * stealing it from us.
+        *
+        * We make the other tasks wait for the flush only when we can flush
+        * all things.
         */
-       if (ret && flush) {
+       if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
                flushing = true;
                space_info->flush = 1;
        }
  
        spin_unlock(&space_info->lock);
  
-       if (!ret || !flush)
+       if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
                goto out;
  
        ret = flush_space(root, space_info, num_bytes, orig_bytes,
                          flush_state);
        flush_state++;
+       /*
+        * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
+        * would happen. So skip delalloc flush.
+        */
+       if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
+           (flush_state == FLUSH_DELALLOC ||
+            flush_state == FLUSH_DELALLOC_WAIT))
+               flush_state = ALLOC_CHUNK;
        if (!ret)
                goto again;
-       else if (flush_state <= COMMIT_TRANS)
+       else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
+                flush_state < COMMIT_TRANS)
+               goto again;
+       else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
+                flush_state <= COMMIT_TRANS)
                goto again;
  
  out:
@@@ -4148,9 -4177,9 +4177,9 @@@ void btrfs_free_block_rsv(struct btrfs_
        kfree(rsv);
  }
  
static inline int __block_rsv_add(struct btrfs_root *root,
-                                 struct btrfs_block_rsv *block_rsv,
-                                 u64 num_bytes, int flush)
int btrfs_block_rsv_add(struct btrfs_root *root,
+                       struct btrfs_block_rsv *block_rsv, u64 num_bytes,
+                       enum btrfs_reserve_flush_enum flush)
  {
        int ret;
  
        return ret;
  }
  
- int btrfs_block_rsv_add(struct btrfs_root *root,
-                       struct btrfs_block_rsv *block_rsv,
-                       u64 num_bytes)
- {
-       return __block_rsv_add(root, block_rsv, num_bytes, 1);
- }
- int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
-                               struct btrfs_block_rsv *block_rsv,
-                               u64 num_bytes)
- {
-       return __block_rsv_add(root, block_rsv, num_bytes, 0);
- }
  int btrfs_block_rsv_check(struct btrfs_root *root,
                          struct btrfs_block_rsv *block_rsv, int min_factor)
  {
        return ret;
  }
  
static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
-                                          struct btrfs_block_rsv *block_rsv,
-                                          u64 min_reserved, int flush)
int btrfs_block_rsv_refill(struct btrfs_root *root,
+                          struct btrfs_block_rsv *block_rsv, u64 min_reserved,
+                          enum btrfs_reserve_flush_enum flush)
  {
        u64 num_bytes = 0;
        int ret = -ENOSPC;
        return ret;
  }
  
- int btrfs_block_rsv_refill(struct btrfs_root *root,
-                          struct btrfs_block_rsv *block_rsv,
-                          u64 min_reserved)
- {
-       return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
- }
- int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
-                                  struct btrfs_block_rsv *block_rsv,
-                                  u64 min_reserved)
- {
-       return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
- }
  int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
                            struct btrfs_block_rsv *dst_rsv,
                            u64 num_bytes)
@@@ -4532,17 -4533,27 +4533,27 @@@ int btrfs_delalloc_reserve_metadata(str
        u64 csum_bytes;
        unsigned nr_extents = 0;
        int extra_reserve = 0;
-       int flush = 1;
+       enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
        int ret;
+       bool delalloc_lock = true;
  
-       /* Need to be holding the i_mutex here if we aren't free space cache */
-       if (btrfs_is_free_space_inode(inode))
-               flush = 0;
+       /* If we are a free space inode we need to not flush since we will be in
+        * the middle of a transaction commit.  We also don't need the delalloc
+        * mutex since we won't race with anybody.  We need this mostly to make
+        * lockdep shut its filthy mouth.
+        */
+       if (btrfs_is_free_space_inode(inode)) {
+               flush = BTRFS_RESERVE_NO_FLUSH;
+               delalloc_lock = false;
+       }
  
-       if (flush && btrfs_transaction_in_commit(root->fs_info))
+       if (flush != BTRFS_RESERVE_NO_FLUSH &&
+           btrfs_transaction_in_commit(root->fs_info))
                schedule_timeout(1);
  
-       mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
+       if (delalloc_lock)
+               mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
        num_bytes = ALIGN(num_bytes, root->sectorsize);
  
        spin_lock(&BTRFS_I(inode)->lock);
                ret = btrfs_qgroup_reserve(root, num_bytes +
                                           nr_extents * root->leafsize);
                if (ret) {
-                       mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
+                       spin_lock(&BTRFS_I(inode)->lock);
+                       calc_csum_metadata_size(inode, num_bytes, 0);
+                       spin_unlock(&BTRFS_I(inode)->lock);
+                       if (delalloc_lock)
+                               mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
                        return ret;
                }
        }
                                                      btrfs_ino(inode),
                                                      to_free, 0);
                }
-               mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
+               if (root->fs_info->quota_enabled) {
+                       btrfs_qgroup_free(root, num_bytes +
+                                               nr_extents * root->leafsize);
+               }
+               if (delalloc_lock)
+                       mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
                return ret;
        }
  
        }
        BTRFS_I(inode)->reserved_extents += nr_extents;
        spin_unlock(&BTRFS_I(inode)->lock);
-       mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
+       if (delalloc_lock)
+               mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  
        if (to_reserve)
                trace_btrfs_space_reservation(root->fs_info,"delalloc",
@@@ -4969,9 -4991,13 +4991,13 @@@ static int unpin_extent_range(struct bt
  {
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_block_group_cache *cache = NULL;
+       struct btrfs_space_info *space_info;
+       struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
        u64 len;
+       bool readonly;
  
        while (start <= end) {
+               readonly = false;
                if (!cache ||
                    start >= cache->key.objectid + cache->key.offset) {
                        if (cache)
                }
  
                start += len;
+               space_info = cache->space_info;
  
-               spin_lock(&cache->space_info->lock);
+               spin_lock(&space_info->lock);
                spin_lock(&cache->lock);
                cache->pinned -= len;
-               cache->space_info->bytes_pinned -= len;
-               if (cache->ro)
-                       cache->space_info->bytes_readonly += len;
+               space_info->bytes_pinned -= len;
+               if (cache->ro) {
+                       space_info->bytes_readonly += len;
+                       readonly = true;
+               }
                spin_unlock(&cache->lock);
-               spin_unlock(&cache->space_info->lock);
+               if (!readonly && global_rsv->space_info == space_info) {
+                       spin_lock(&global_rsv->lock);
+                       if (!global_rsv->full) {
+                               len = min(len, global_rsv->size -
+                                         global_rsv->reserved);
+                               global_rsv->reserved += len;
+                               space_info->bytes_may_use += len;
+                               if (global_rsv->reserved >= global_rsv->size)
+                                       global_rsv->full = 1;
+                       }
+                       spin_unlock(&global_rsv->lock);
+               }
+               spin_unlock(&space_info->lock);
        }
  
        if (cache)
@@@ -5466,7 -5507,7 +5507,7 @@@ wait_block_group_cache_done(struct btrf
        return 0;
  }
  
static int __get_block_group_index(u64 flags)
int __get_raid_index(u64 flags)
  {
        int index;
  
  
  static int get_block_group_index(struct btrfs_block_group_cache *cache)
  {
-       return __get_block_group_index(cache->flags);
+       return __get_raid_index(cache->flags);
  }
  
  enum btrfs_loop_type {
@@@ -5519,7 -5560,7 +5560,7 @@@ static noinline int find_free_extent(st
        int empty_cluster = 2 * 1024 * 1024;
        struct btrfs_space_info *space_info;
        int loop = 0;
-       int index = 0;
+       int index = __get_raid_index(data);
        int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
                RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
        bool found_uncached_bg = false;
@@@ -6269,7 -6310,8 +6310,8 @@@ use_block_rsv(struct btrfs_trans_handl
        block_rsv = get_block_rsv(trans, root);
  
        if (block_rsv->size == 0) {
-               ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
+               ret = reserve_metadata_bytes(root, block_rsv, blocksize,
+                                            BTRFS_RESERVE_NO_FLUSH);
                /*
                 * If we couldn't reserve metadata bytes try and use some from
                 * the global reserve.
                static DEFINE_RATELIMIT_STATE(_rs,
                                DEFAULT_RATELIMIT_INTERVAL,
                                /*DEFAULT_RATELIMIT_BURST*/ 2);
-               if (__ratelimit(&_rs)) {
-                       printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
-                       WARN_ON(1);
-               }
-               ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
+               if (__ratelimit(&_rs))
+                       WARN(1, KERN_DEBUG "btrfs: block rsv returned %d\n",
+                            ret);
+               ret = reserve_metadata_bytes(root, block_rsv, blocksize,
+                                            BTRFS_RESERVE_NO_FLUSH);
                if (!ret) {
                        return block_rsv;
                } else if (ret && block_rsv != global_rsv) {
@@@ -6482,7 -6524,7 +6524,7 @@@ reada
  }
  
  /*
 - * hepler to process tree block while walking down the tree.
 + * helper to process tree block while walking down the tree.
   *
   * when wc->stage == UPDATE_BACKREF, this function updates
   * back refs for pointers in the block.
@@@ -6557,7 -6599,7 +6599,7 @@@ static noinline int walk_down_proc(stru
  }
  
  /*
 - * hepler to process tree block pointer.
 + * helper to process tree block pointer.
   *
   * when wc->stage == DROP_REFERENCE, this function checks
   * reference count of the block pointed to. if the block
@@@ -6695,7 -6737,7 +6737,7 @@@ skip
  }
  
  /*
 - * hepler to process tree block while walking up the tree.
 + * helper to process tree block while walking up the tree.
   *
   * when wc->stage == DROP_REFERENCE, this function drops
   * reference count on the block.
@@@ -6746,11 -6788,13 +6788,13 @@@ static noinline int walk_up_proc(struc
                                                       &wc->flags[level]);
                        if (ret < 0) {
                                btrfs_tree_unlock_rw(eb, path->locks[level]);
+                               path->locks[level] = 0;
                                return ret;
                        }
                        BUG_ON(wc->refs[level] == 0);
                        if (wc->refs[level] == 1) {
                                btrfs_tree_unlock_rw(eb, path->locks[level]);
+                               path->locks[level] = 0;
                                return 1;
                        }
                }
@@@ -7427,7 -7471,7 +7471,7 @@@ int btrfs_can_relocate(struct btrfs_roo
         */
        target = get_restripe_target(root->fs_info, block_group->flags);
        if (target) {
-               index = __get_block_group_index(extended_to_chunk(target));
+               index = __get_raid_index(extended_to_chunk(target));
        } else {
                /*
                 * this is just a balance, so if we were marked as full
                 * check to make sure we can actually find a chunk with enough
                 * space to fit our block group in.
                 */
-               if (device->total_bytes > device->bytes_used + min_free) {
+               if (device->total_bytes > device->bytes_used + min_free &&
+                   !device->is_tgtdev_for_dev_replace) {
                        ret = find_free_dev_extent(device, min_free,
                                                   &dev_offset, NULL);
                        if (!ret)
diff --combined fs/btrfs/relocation.c
index c188e815a7330570bd79d292eed68cec7ee0d372,300e09ac36599ae8b412284b43e677792005fe9e..17c306bf177a22b89f73df539d4622a63946f07f
@@@ -2025,7 -2025,6 +2025,6 @@@ static noinline_for_stack int merge_rel
        struct btrfs_root_item *root_item;
        struct btrfs_path *path;
        struct extent_buffer *leaf;
-       unsigned long nr;
        int level;
        int max_level;
        int replaced = 0;
                BUG_ON(IS_ERR(trans));
                trans->block_rsv = rc->block_rsv;
  
-               ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved);
+               ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
+                                            BTRFS_RESERVE_FLUSH_ALL);
                if (ret) {
                        BUG_ON(ret != -EAGAIN);
                        ret = btrfs_commit_transaction(trans, root);
                               path->slots[level]);
                root_item->drop_level = level;
  
-               nr = trans->blocks_used;
                btrfs_end_transaction_throttle(trans, root);
  
-               btrfs_btree_balance_dirty(root, nr);
+               btrfs_btree_balance_dirty(root);
  
                if (replaced && rc->stage == UPDATE_DATA_PTRS)
                        invalidate_extent_cache(root, &key, &next_key);
@@@ -2155,10 -2154,9 +2154,9 @@@ out
                btrfs_update_reloc_root(trans, root);
        }
  
-       nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
  
-       btrfs_btree_balance_dirty(root, nr);
+       btrfs_btree_balance_dirty(root);
  
        if (replaced && rc->stage == UPDATE_DATA_PTRS)
                invalidate_extent_cache(root, &key, &next_key);
@@@ -2184,7 -2182,8 +2182,8 @@@ int prepare_to_merge(struct reloc_contr
  again:
        if (!err) {
                num_bytes = rc->merging_rsv_size;
-               ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes);
+               ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
+                                         BTRFS_RESERVE_FLUSH_ALL);
                if (ret)
                        err = ret;
        }
@@@ -2459,7 -2458,8 +2458,8 @@@ static int reserve_metadata_space(struc
        num_bytes = calcu_metadata_size(rc, node, 1) * 2;
  
        trans->block_rsv = rc->block_rsv;
-       ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes);
+       ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
+                                 BTRFS_RESERVE_FLUSH_ALL);
        if (ret) {
                if (ret == -EAGAIN)
                        rc->commit_transaction = 1;
@@@ -3259,7 -3259,6 +3259,6 @@@ static int delete_block_group_cache(str
        struct btrfs_path *path;
        struct btrfs_root *root = fs_info->tree_root;
        struct btrfs_trans_handle *trans;
-       unsigned long nr;
        int ret = 0;
  
        if (inode)
@@@ -3293,9 -3292,8 +3292,8 @@@ truncate
        ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
  
        btrfs_free_path(path);
-       nr = trans->blocks_used;
        btrfs_end_transaction(trans, root);
-       btrfs_btree_balance_dirty(root, nr);
+       btrfs_btree_balance_dirty(root);
  out:
        iput(inode);
        return ret;
@@@ -3474,7 -3472,7 +3472,7 @@@ out
  }
  
  /*
 - * hepler to find all tree blocks that reference a given data extent
 + * helper to find all tree blocks that reference a given data extent
   */
  static noinline_for_stack
  int add_data_references(struct reloc_control *rc,
  }
  
  /*
 - * hepler to find next unprocessed extent
 + * helper to find next unprocessed extent
   */
  static noinline_for_stack
  int find_next_extent(struct btrfs_trans_handle *trans,
@@@ -3685,7 -3683,8 +3683,8 @@@ int prepare_to_relocate(struct reloc_co
         * is no reservation in transaction handle.
         */
        ret = btrfs_block_rsv_add(rc->extent_root, rc->block_rsv,
-                                 rc->extent_root->nodesize * 256);
+                                 rc->extent_root->nodesize * 256,
+                                 BTRFS_RESERVE_FLUSH_ALL);
        if (ret)
                return ret;
  
@@@ -3711,7 -3710,6 +3710,6 @@@ static noinline_for_stack int relocate_
        struct btrfs_trans_handle *trans = NULL;
        struct btrfs_path *path;
        struct btrfs_extent_item *ei;
-       unsigned long nr;
        u64 flags;
        u32 item_size;
        int ret;
@@@ -3828,9 -3826,8 +3826,8 @@@ restart
                        ret = btrfs_commit_transaction(trans, rc->extent_root);
                        BUG_ON(ret);
                } else {
-                       nr = trans->blocks_used;
                        btrfs_end_transaction_throttle(trans, rc->extent_root);
-                       btrfs_btree_balance_dirty(rc->extent_root, nr);
+                       btrfs_btree_balance_dirty(rc->extent_root);
                }
                trans = NULL;
  
                          GFP_NOFS);
  
        if (trans) {
-               nr = trans->blocks_used;
                btrfs_end_transaction_throttle(trans, rc->extent_root);
-               btrfs_btree_balance_dirty(rc->extent_root, nr);
+               btrfs_btree_balance_dirty(rc->extent_root);
        }
  
        if (!err) {
@@@ -3941,7 -3937,6 +3937,6 @@@ struct inode *create_reloc_inode(struc
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root;
        struct btrfs_key key;
-       unsigned long nr;
        u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
        int err = 0;
  
  
        err = btrfs_orphan_add(trans, inode);
  out:
-       nr = trans->blocks_used;
        btrfs_end_transaction(trans, root);
-       btrfs_btree_balance_dirty(root, nr);
+       btrfs_btree_balance_dirty(root);
        if (err) {
                if (inode)
                        iput(inode);
@@@ -4057,7 -4051,11 +4051,11 @@@ int btrfs_relocate_block_group(struct b
               (unsigned long long)rc->block_group->key.objectid,
               (unsigned long long)rc->block_group->flags);
  
-       btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
+       ret = btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
+       if (ret < 0) {
+               err = ret;
+               goto out;
+       }
        btrfs_wait_ordered_extents(fs_info->tree_root, 0);
  
        while (1) {
diff --combined fs/btrfs/transaction.c
index 68704e74f0d38415f910c08aac5915f663ea9ccd,f15494699f3b649b1fd743b6bb5e5c7764e334d6..fbe408204628e52314405022c24e2460f091bec4
@@@ -30,6 -30,7 +30,7 @@@
  #include "tree-log.h"
  #include "inode-map.h"
  #include "volumes.h"
+ #include "dev-replace.h"
  
  #define BTRFS_ROOT_TRANS_TAG 0
  
@@@ -111,6 -112,7 +112,6 @@@ loop
                 * to redo the trans_no_join checks above
                 */
                kmem_cache_free(btrfs_transaction_cachep, cur_trans);
 -              cur_trans = fs_info->running_transaction;
                goto loop;
        } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
                spin_unlock(&fs_info->trans_lock);
         * the log must never go across transaction boundaries.
         */
        smp_mb();
-       if (!list_empty(&fs_info->tree_mod_seq_list)) {
-               printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
+       if (!list_empty(&fs_info->tree_mod_seq_list))
+               WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
                        "creating a fresh transaction\n");
-               WARN_ON(1);
-       }
-       if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
-               printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
+       if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
+               WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
                        "creating a fresh transaction\n");
-               WARN_ON(1);
-       }
        atomic_set(&fs_info->tree_mod_seq, 0);
  
        spin_lock_init(&cur_trans->commit_lock);
@@@ -294,9 -292,9 +291,9 @@@ static int may_wait_transaction(struct 
        return 0;
  }
  
- static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
                                                  u64 num_items, int type,
-                                                   int noflush)
+ static struct btrfs_trans_handle *
start_transaction(struct btrfs_root *root, u64 num_items, int type,
+                 enum btrfs_reserve_flush_enum flush)
  {
        struct btrfs_trans_handle *h;
        struct btrfs_transaction *cur_trans;
                WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
                h = current->journal_info;
                h->use_count++;
+               WARN_ON(h->use_count > 2);
                h->orig_rsv = h->block_rsv;
                h->block_rsv = NULL;
                goto got_it;
                }
  
                num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
-               if (noflush)
-                       ret = btrfs_block_rsv_add_noflush(root,
-                                               &root->fs_info->trans_block_rsv,
-                                               num_bytes);
-               else
-                       ret = btrfs_block_rsv_add(root,
-                                               &root->fs_info->trans_block_rsv,
-                                               num_bytes);
+               ret = btrfs_block_rsv_add(root,
+                                         &root->fs_info->trans_block_rsv,
+                                         num_bytes, flush);
                if (ret)
                        return ERR_PTR(ret);
        }
@@@ -421,13 -415,15 +414,15 @@@ got_it
  struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
                                                   int num_items)
  {
-       return start_transaction(root, num_items, TRANS_START, 0);
+       return start_transaction(root, num_items, TRANS_START,
+                                BTRFS_RESERVE_FLUSH_ALL);
  }
  
- struct btrfs_trans_handle *btrfs_start_transaction_noflush(
+ struct btrfs_trans_handle *btrfs_start_transaction_lflush(
                                        struct btrfs_root *root, int num_items)
  {
-       return start_transaction(root, num_items, TRANS_START, 1);
+       return start_transaction(root, num_items, TRANS_START,
+                                BTRFS_RESERVE_FLUSH_LIMIT);
  }
  
  struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
@@@ -460,28 -456,31 +455,31 @@@ static noinline void wait_for_commit(st
  int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  {
        struct btrfs_transaction *cur_trans = NULL, *t;
-       int ret;
+       int ret = 0;
  
-       ret = 0;
        if (transid) {
                if (transid <= root->fs_info->last_trans_committed)
                        goto out;
  
+               ret = -EINVAL;
                /* find specified transaction */
                spin_lock(&root->fs_info->trans_lock);
                list_for_each_entry(t, &root->fs_info->trans_list, list) {
                        if (t->transid == transid) {
                                cur_trans = t;
                                atomic_inc(&cur_trans->use_count);
+                               ret = 0;
                                break;
                        }
-                       if (t->transid > transid)
+                       if (t->transid > transid) {
+                               ret = 0;
                                break;
+                       }
                }
                spin_unlock(&root->fs_info->trans_lock);
-               ret = -EINVAL;
+               /* The specified transaction doesn't exist */
                if (!cur_trans)
-                       goto out;  /* bad transid */
+                       goto out;
        } else {
                /* find newest transaction that is committing | committed */
                spin_lock(&root->fs_info->trans_lock);
        }
  
        wait_for_commit(root, cur_trans);
        put_transaction(cur_trans);
-       ret = 0;
  out:
        return ret;
  }
@@@ -850,7 -847,9 +846,9 @@@ static noinline int commit_cowonly_root
                return ret;
  
        ret = btrfs_run_dev_stats(trans, root->fs_info);
-       BUG_ON(ret);
+       WARN_ON(ret);
+       ret = btrfs_run_dev_replace(trans, root->fs_info);
+       WARN_ON(ret);
  
        ret = btrfs_run_qgroups(trans, root->fs_info);
        BUG_ON(ret);
        switch_commit_root(fs_info->extent_root);
        up_write(&fs_info->extent_commit_sem);
  
+       btrfs_after_dev_replace_commit(fs_info);
        return 0;
  }
  
@@@ -957,7 -958,6 +957,6 @@@ int btrfs_defrag_root(struct btrfs_roo
        struct btrfs_fs_info *info = root->fs_info;
        struct btrfs_trans_handle *trans;
        int ret;
-       unsigned long nr;
  
        if (xchg(&root->defrag_running, 1))
                return 0;
  
                ret = btrfs_defrag_leaves(trans, root, cacheonly);
  
-               nr = trans->blocks_used;
                btrfs_end_transaction(trans, root);
-               btrfs_btree_balance_dirty(info->tree_root, nr);
+               btrfs_btree_balance_dirty(info->tree_root);
                cond_resched();
  
                if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
@@@ -1031,8 -1030,9 +1029,9 @@@ static noinline int create_pending_snap
        btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
  
        if (to_reserve > 0) {
-               ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
-                                                 to_reserve);
+               ret = btrfs_block_rsv_add(root, &pending->block_rsv,
+                                         to_reserve,
+                                         BTRFS_RESERVE_NO_FLUSH);
                if (ret) {
                        pending->error = ret;
                        goto no_free_objectid;
                                    parent_inode, &key,
                                    BTRFS_FT_DIR, index);
        /* We have check then name at the beginning, so it is impossible. */
-       BUG_ON(ret == -EEXIST);
+       BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
        if (ret) {
                btrfs_abort_transaction(trans, root, ret);
                goto fail;
@@@ -1308,9 -1308,10 +1307,10 @@@ static void do_async_commit(struct work
         * We've got freeze protection passed with the transaction.
         * Tell lockdep about it.
         */
-       rwsem_acquire_read(
-               &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
-               0, 1, _THIS_IP_);
+       if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
+               rwsem_acquire_read(
+                    &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
+                    0, 1, _THIS_IP_);
  
        current->journal_info = ac->newtrans;
  
@@@ -1348,8 -1349,10 +1348,10 @@@ int btrfs_commit_transaction_async(stru
         * Tell lockdep we've released the freeze rwsem, since the
         * async commit thread will be the one to unlock it.
         */
-       rwsem_release(&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
-                     1, _THIS_IP_);
+       if (trans->type < TRANS_JOIN_NOLOCK)
+               rwsem_release(
+                       &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
+                       1, _THIS_IP_);
  
        schedule_delayed_work(&ac->work, 0);
  
@@@ -1399,6 -1402,48 +1401,48 @@@ static void cleanup_transaction(struct 
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
  }
  
+ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
+                                         struct btrfs_root *root)
+ {
+       int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
+       int snap_pending = 0;
+       int ret;
+       if (!flush_on_commit) {
+               spin_lock(&root->fs_info->trans_lock);
+               if (!list_empty(&trans->transaction->pending_snapshots))
+                       snap_pending = 1;
+               spin_unlock(&root->fs_info->trans_lock);
+       }
+       if (flush_on_commit || snap_pending) {
+               btrfs_start_delalloc_inodes(root, 1);
+               btrfs_wait_ordered_extents(root, 1);
+       }
+       ret = btrfs_run_delayed_items(trans, root);
+       if (ret)
+               return ret;
+       /*
+        * running the delayed items may have added new refs. account
+        * them now so that they hinder processing of more delayed refs
+        * as little as possible.
+        */
+       btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
+       /*
+        * rename don't use btrfs_join_transaction, so, once we
+        * set the transaction to blocked above, we aren't going
+        * to get any new ordered operations.  We can safely run
+        * it here and no for sure that nothing new will be added
+        * to the list
+        */
+       btrfs_run_ordered_operations(root, 1);
+       return 0;
+ }
  /*
   * btrfs_transaction state sequence:
   *    in_commit = 0, blocked = 0  (initial)
@@@ -1413,15 -1458,21 +1457,21 @@@ int btrfs_commit_transaction(struct btr
        struct btrfs_transaction *cur_trans = trans->transaction;
        struct btrfs_transaction *prev_trans = NULL;
        DEFINE_WAIT(wait);
-       int ret = -EIO;
+       int ret;
        int should_grow = 0;
        unsigned long now = get_seconds();
-       int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
  
-       btrfs_run_ordered_operations(root, 0);
+       ret = btrfs_run_ordered_operations(root, 0);
+       if (ret) {
+               btrfs_abort_transaction(trans, root, ret);
+               goto cleanup_transaction;
+       }
  
-       if (cur_trans->aborted)
+       /* Stop the commit early if ->aborted is set */
+       if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
+               ret = cur_trans->aborted;
                goto cleanup_transaction;
+       }
  
        /* make a pass through all the delayed refs we have so far
         * any runnings procs may add more while we are here
                should_grow = 1;
  
        do {
-               int snap_pending = 0;
                joined = cur_trans->num_joined;
-               if (!list_empty(&trans->transaction->pending_snapshots))
-                       snap_pending = 1;
  
                WARN_ON(cur_trans != trans->transaction);
  
-               if (flush_on_commit || snap_pending) {
-                       btrfs_start_delalloc_inodes(root, 1);
-                       btrfs_wait_ordered_extents(root, 1);
-               }
-               ret = btrfs_run_delayed_items(trans, root);
+               ret = btrfs_flush_all_pending_stuffs(trans, root);
                if (ret)
                        goto cleanup_transaction;
  
-               /*
-                * running the delayed items may have added new refs. account
-                * them now so that they hinder processing of more delayed refs
-                * as little as possible.
-                */
-               btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
-               /*
-                * rename don't use btrfs_join_transaction, so, once we
-                * set the transaction to blocked above, we aren't going
-                * to get any new ordered operations.  We can safely run
-                * it here and no for sure that nothing new will be added
-                * to the list
-                */
-               btrfs_run_ordered_operations(root, 1);
                prepare_to_wait(&cur_trans->writer_wait, &wait,
                                TASK_UNINTERRUPTIBLE);
  
        } while (atomic_read(&cur_trans->num_writers) > 1 ||
                 (should_grow && cur_trans->num_joined != joined));
  
+       ret = btrfs_flush_all_pending_stuffs(trans, root);
+       if (ret)
+               goto cleanup_transaction;
        /*
         * Ok now we need to make sure to block out any other joins while we
         * commit the transaction.  We could have started a join before setting
        wait_event(cur_trans->writer_wait,
                   atomic_read(&cur_trans->num_writers) == 1);
  
+       /* ->aborted might be set after the previous check, so check it */
+       if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
+               ret = cur_trans->aborted;
+               goto cleanup_transaction;
+       }
        /*
         * the reloc mutex makes sure that we stop
         * the balancing code from coming in and moving
                goto cleanup_transaction;
        }
  
+       /*
+        * The tasks which save the space cache and inode cache may also
+        * update ->aborted, check it.
+        */
+       if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
+               ret = cur_trans->aborted;
+               mutex_unlock(&root->fs_info->tree_log_mutex);
+               mutex_unlock(&root->fs_info->reloc_mutex);
+               goto cleanup_transaction;
+       }
        btrfs_prepare_extent_commit(trans, root);
  
        cur_trans = root->fs_info->running_transaction;
diff --combined fs/hpfs/inode.c
index 405ab77db39cf2de1dd56ded99beb4073c2c5742,5dc06c8371051855f836ce7666e659471c6cb988..9edeeb0ea97ef885c8910e62fcd73eecb8128daf
@@@ -147,7 -147,7 +147,7 @@@ static void hpfs_write_inode_ea(struct 
        /*if (le32_to_cpu(fnode->acl_size_l) || le16_to_cpu(fnode->acl_size_s)) {
                   Some unknown structures like ACL may be in fnode,
                   we'd better not overwrite them
 -              hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino);
 +              hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 structures", i->i_ino);
        } else*/ if (hpfs_sb(i->i_sb)->sb_eas >= 2) {
                __le32 ea;
                if (!uid_eq(i->i_uid, hpfs_sb(i->i_sb)->sb_uid) || hpfs_inode->i_ea_uid) {
@@@ -277,9 -277,12 +277,12 @@@ int hpfs_setattr(struct dentry *dentry
  
        if ((attr->ia_valid & ATTR_SIZE) &&
            attr->ia_size != i_size_read(inode)) {
-               error = vmtruncate(inode, attr->ia_size);
+               error = inode_newsize_ok(inode, attr->ia_size);
                if (error)
                        goto out_unlock;
+               truncate_setsize(inode, attr->ia_size);
+               hpfs_truncate(inode);
        }
  
        setattr_copy(inode, attr);
diff --combined include/linux/dma-buf.h
index 139e673a44f8cc4050191ca19ad10069a857582f,bd2e52ccc4f24a7345c5eceaa698f3b2dc03ea82..3d754a394e921a6f9e1e0cdc7b62c28df80f29c0
@@@ -53,7 -53,7 +53,7 @@@ struct dma_buf_attachment
   * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
   *                  caches and allocate backing storage (if not yet done)
   *                  respectively pin the objet into memory.
 - * @end_cpu_access: [optional] called after cpu access to flush cashes.
 + * @end_cpu_access: [optional] called after cpu access to flush caches.
   * @kmap_atomic: maps a page from the buffer into kernel address
   *             space, users may not block until the subsequent unmap call.
   *             This callback must not sleep.
@@@ -156,7 -156,6 +156,6 @@@ static inline void get_dma_buf(struct d
        get_file(dmabuf->file);
  }
  
- #ifdef CONFIG_DMA_SHARED_BUFFER
  struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
                                                        struct device *dev);
  void dma_buf_detach(struct dma_buf *dmabuf,
@@@ -184,103 -183,5 +183,5 @@@ int dma_buf_mmap(struct dma_buf *, stru
                 unsigned long);
  void *dma_buf_vmap(struct dma_buf *);
  void dma_buf_vunmap(struct dma_buf *, void *vaddr);
- #else
- static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
-                                                       struct device *dev)
- {
-       return ERR_PTR(-ENODEV);
- }
- static inline void dma_buf_detach(struct dma_buf *dmabuf,
-                                 struct dma_buf_attachment *dmabuf_attach)
- {
-       return;
- }
- static inline struct dma_buf *dma_buf_export(void *priv,
-                                            const struct dma_buf_ops *ops,
-                                            size_t size, int flags)
- {
-       return ERR_PTR(-ENODEV);
- }
- static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags)
- {
-       return -ENODEV;
- }
- static inline struct dma_buf *dma_buf_get(int fd)
- {
-       return ERR_PTR(-ENODEV);
- }
- static inline void dma_buf_put(struct dma_buf *dmabuf)
- {
-       return;
- }
- static inline struct sg_table *dma_buf_map_attachment(
-       struct dma_buf_attachment *attach, enum dma_data_direction write)
- {
-       return ERR_PTR(-ENODEV);
- }
- static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
-                       struct sg_table *sg, enum dma_data_direction dir)
- {
-       return;
- }
- static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
-                                          size_t start, size_t len,
-                                          enum dma_data_direction dir)
- {
-       return -ENODEV;
- }
- static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
-                                         size_t start, size_t len,
-                                         enum dma_data_direction dir)
- {
- }
- static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf,
-                                       unsigned long pnum)
- {
-       return NULL;
- }
- static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
-                                        unsigned long pnum, void *vaddr)
- {
- }
- static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum)
- {
-       return NULL;
- }
- static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
-                                 unsigned long pnum, void *vaddr)
- {
- }
- static inline int dma_buf_mmap(struct dma_buf *dmabuf,
-                              struct vm_area_struct *vma,
-                              unsigned long pgoff)
- {
-       return -ENODEV;
- }
- static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
- {
-       return NULL;
- }
- static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
- {
- }
- #endif /* CONFIG_DMA_SHARED_BUFFER */
  
  #endif /* __DMA_BUF_H__ */
diff --combined net/bluetooth/hci_core.c
index 8de421d28cb5db82c18f7ef09eb64be6451086c3,0f78e34220c9025aae08f3b38b49924b6c397ff6..6a2d0387475c206674483dd0dbe6f24a962fb2bc
@@@ -178,48 -178,13 +178,13 @@@ static void hci_reset_req(struct hci_de
  
  static void bredr_init(struct hci_dev *hdev)
  {
-       struct hci_cp_delete_stored_link_key cp;
-       __le16 param;
-       __u8 flt_type;
        hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
  
-       /* Mandatory initialization */
        /* Read Local Supported Features */
        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
  
        /* Read Local Version */
        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-       /* Read Buffer Size (ACL mtu, max pkt, etc.) */
-       hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
-       /* Read BD Address */
-       hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
-       /* Read Class of Device */
-       hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
-       /* Read Local Name */
-       hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
-       /* Read Voice Setting */
-       hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
-       /* Optional initialization */
-       /* Clear Event Filters */
-       flt_type = HCI_FLT_CLEAR_ALL;
-       hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
-       /* Connection accept timeout ~20 secs */
-       param = __constant_cpu_to_le16(0x7d00);
-       hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
-       bacpy(&cp.bdaddr, BDADDR_ANY);
-       cp.delete_all = 1;
-       hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
  }
  
  static void amp_init(struct hci_dev *hdev)
@@@ -273,14 -238,6 +238,6 @@@ static void hci_init_req(struct hci_de
        }
  }
  
- static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
- {
-       BT_DBG("%s", hdev->name);
-       /* Read LE buffer size */
-       hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
- }
  static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
  {
        __u8 scan = opt;
@@@ -405,7 -362,7 +362,7 @@@ struct inquiry_entry *hci_inquiry_cache
        struct discovery_state *cache = &hdev->discovery;
        struct inquiry_entry *e;
  
-       BT_DBG("cache %p, %s", cache, batostr(bdaddr));
+       BT_DBG("cache %p, %pMR", cache, bdaddr);
  
        list_for_each_entry(e, &cache->all, all) {
                if (!bacmp(&e->data.bdaddr, bdaddr))
@@@ -421,7 -378,7 +378,7 @@@ struct inquiry_entry *hci_inquiry_cache
        struct discovery_state *cache = &hdev->discovery;
        struct inquiry_entry *e;
  
-       BT_DBG("cache %p, %s", cache, batostr(bdaddr));
+       BT_DBG("cache %p, %pMR", cache, bdaddr);
  
        list_for_each_entry(e, &cache->unknown, list) {
                if (!bacmp(&e->data.bdaddr, bdaddr))
@@@ -438,7 -395,7 +395,7 @@@ struct inquiry_entry *hci_inquiry_cache
        struct discovery_state *cache = &hdev->discovery;
        struct inquiry_entry *e;
  
-       BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
+       BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
  
        list_for_each_entry(e, &cache->resolve, list) {
                if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
@@@ -475,7 -432,9 +432,9 @@@ bool hci_inquiry_cache_update(struct hc
        struct discovery_state *cache = &hdev->discovery;
        struct inquiry_entry *ie;
  
-       BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
+       BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
+       hci_remove_remote_oob_data(hdev, &data->bdaddr);
  
        if (ssp)
                *ssp = data->ssp_mode;
@@@ -637,6 -596,99 +596,99 @@@ done
        return err;
  }
  
+ static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
+ {
+       u8 ad_len = 0, flags = 0;
+       size_t name_len;
+       if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
+               flags |= LE_AD_GENERAL;
+       if (!lmp_bredr_capable(hdev))
+               flags |= LE_AD_NO_BREDR;
+       if (lmp_le_br_capable(hdev))
+               flags |= LE_AD_SIM_LE_BREDR_CTRL;
+       if (lmp_host_le_br_capable(hdev))
+               flags |= LE_AD_SIM_LE_BREDR_HOST;
+       if (flags) {
+               BT_DBG("adv flags 0x%02x", flags);
+               ptr[0] = 2;
+               ptr[1] = EIR_FLAGS;
+               ptr[2] = flags;
+               ad_len += 3;
+               ptr += 3;
+       }
+       if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
+               ptr[0] = 2;
+               ptr[1] = EIR_TX_POWER;
+               ptr[2] = (u8) hdev->adv_tx_power;
+               ad_len += 3;
+               ptr += 3;
+       }
+       name_len = strlen(hdev->dev_name);
+       if (name_len > 0) {
+               size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
+               if (name_len > max_len) {
+                       name_len = max_len;
+                       ptr[1] = EIR_NAME_SHORT;
+               } else
+                       ptr[1] = EIR_NAME_COMPLETE;
+               ptr[0] = name_len + 1;
+               memcpy(ptr + 2, hdev->dev_name, name_len);
+               ad_len += (name_len + 2);
+               ptr += (name_len + 2);
+       }
+       return ad_len;
+ }
+ int hci_update_ad(struct hci_dev *hdev)
+ {
+       struct hci_cp_le_set_adv_data cp;
+       u8 len;
+       int err;
+       hci_dev_lock(hdev);
+       if (!lmp_le_capable(hdev)) {
+               err = -EINVAL;
+               goto unlock;
+       }
+       memset(&cp, 0, sizeof(cp));
+       len = create_ad(hdev, cp.data);
+       if (hdev->adv_data_len == len &&
+           memcmp(cp.data, hdev->adv_data, len) == 0) {
+               err = 0;
+               goto unlock;
+       }
+       memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+       hdev->adv_data_len = len;
+       cp.length = len;
+       err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
+ unlock:
+       hci_dev_unlock(hdev);
+       return err;
+ }
  /* ---- HCI ioctl helpers ---- */
  
  int hci_dev_open(__u16 dev)
  
                ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
  
-               if (lmp_host_le_capable(hdev))
-                       ret = __hci_request(hdev, hci_le_init_req, 0,
-                                           HCI_INIT_TIMEOUT);
                clear_bit(HCI_INIT, &hdev->flags);
        }
  
                hci_dev_hold(hdev);
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
+               hci_update_ad(hdev);
                if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
                    mgmt_valid_hdev(hdev)) {
                        hci_dev_lock(hdev);
@@@ -812,6 -861,9 +861,9 @@@ static int hci_dev_do_close(struct hci_
        /* Clear flags */
        hdev->flags = 0;
  
+       /* Controller radio is available but is currently powered down */
+       hdev->amp_status = 0;
        memset(hdev->eir, 0, sizeof(hdev->eir));
        memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
  
@@@ -1039,10 -1091,17 +1091,17 @@@ int hci_get_dev_info(void __user *arg
        di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
        di.flags    = hdev->flags;
        di.pkt_type = hdev->pkt_type;
-       di.acl_mtu  = hdev->acl_mtu;
-       di.acl_pkts = hdev->acl_pkts;
-       di.sco_mtu  = hdev->sco_mtu;
-       di.sco_pkts = hdev->sco_pkts;
+       if (lmp_bredr_capable(hdev)) {
+               di.acl_mtu  = hdev->acl_mtu;
+               di.acl_pkts = hdev->acl_pkts;
+               di.sco_mtu  = hdev->sco_mtu;
+               di.sco_pkts = hdev->sco_pkts;
+       } else {
+               di.acl_mtu  = hdev->le_mtu;
+               di.acl_pkts = hdev->le_pkts;
+               di.sco_mtu  = 0;
+               di.sco_pkts = 0;
+       }
        di.link_policy = hdev->link_policy;
        di.link_mode   = hdev->link_mode;
  
@@@ -1259,7 -1318,7 +1318,7 @@@ int hci_add_link_key(struct hci_dev *hd
                list_add(&key->list, &hdev->link_keys);
        }
  
-       BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
+       BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
  
        /* Some buggy controller combinations generate a changed
         * combination key for legacy pairing even when there's no
@@@ -1338,7 -1397,7 +1397,7 @@@ int hci_remove_link_key(struct hci_dev 
        if (!key)
                return -ENOENT;
  
-       BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+       BT_DBG("%s removing %pMR", hdev->name, bdaddr);
  
        list_del(&key->list);
        kfree(key);
@@@ -1354,7 -1413,7 +1413,7 @@@ int hci_remove_ltk(struct hci_dev *hdev
                if (bacmp(bdaddr, &k->bdaddr))
                        continue;
  
-               BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+               BT_DBG("%s removing %pMR", hdev->name, bdaddr);
  
                list_del(&k->list);
                kfree(k);
@@@ -1401,7 -1460,7 +1460,7 @@@ int hci_remove_remote_oob_data(struct h
        if (!data)
                return -ENOENT;
  
-       BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+       BT_DBG("%s removing %pMR", hdev->name, bdaddr);
  
        list_del(&data->list);
        kfree(data);
@@@ -1440,7 -1499,7 +1499,7 @@@ int hci_add_remote_oob_data(struct hci_
        memcpy(data->hash, hash, sizeof(data->hash));
        memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
  
-       BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
+       BT_DBG("%s for %pMR", hdev->name, bdaddr);
  
        return 0;
  }
@@@ -1617,6 -1676,9 +1676,9 @@@ int hci_le_scan(struct hci_dev *hdev, u
  
        BT_DBG("%s", hdev->name);
  
+       if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
+               return -ENOTSUPP;
        if (work_busy(&hdev->le_scan))
                return -EINPROGRESS;
  
@@@ -1643,6 -1705,8 +1705,8 @@@ struct hci_dev *hci_alloc_dev(void
        hdev->esco_type = (ESCO_HV1);
        hdev->link_mode = (HCI_LM_ACCEPT);
        hdev->io_capability = 0x03; /* No Input No Output */
+       hdev->inq_tx_power = HCI_TX_POWER_INVALID;
+       hdev->adv_tx_power = HCI_TX_POWER_INVALID;
  
        hdev->sniff_max_interval = 800;
        hdev->sniff_min_interval = 80;
@@@ -1754,11 -1818,11 +1818,11 @@@ int hci_register_dev(struct hci_dev *hd
        if (hdev->dev_type != HCI_AMP)
                set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
  
-       schedule_work(&hdev->power_on);
        hci_notify(hdev, HCI_DEV_REG);
        hci_dev_hold(hdev);
  
+       schedule_work(&hdev->power_on);
        return id;
  
  err_wqueue:
@@@ -1793,6 -1857,8 +1857,8 @@@ void hci_unregister_dev(struct hci_dev 
        for (i = 0; i < NUM_REASSEMBLY; i++)
                kfree_skb(hdev->reassembly[i]);
  
+       cancel_work_sync(&hdev->power_on);
        if (!test_bit(HCI_INIT, &hdev->flags) &&
            !test_bit(HCI_SETUP, &hdev->dev_flags)) {
                hci_dev_lock(hdev);
@@@ -1855,7 -1921,7 +1921,7 @@@ int hci_recv_frame(struct sk_buff *skb
                return -ENXIO;
        }
  
 -      /* Incomming skb */
 +      /* Incoming skb */
        bt_cb(skb)->incoming = 1;
  
        /* Time stamp */
@@@ -2153,9 -2219,10 +2219,10 @@@ static void hci_add_acl_hdr(struct sk_b
        hdr->dlen   = cpu_to_le16(len);
  }
  
- static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
+ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
                          struct sk_buff *skb, __u16 flags)
  {
+       struct hci_conn *conn = chan->conn;
        struct hci_dev *hdev = conn->hdev;
        struct sk_buff *list;
  
        skb->data_len = 0;
  
        bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
-       hci_add_acl_hdr(skb, conn->handle, flags);
+       switch (hdev->dev_type) {
+       case HCI_BREDR:
+               hci_add_acl_hdr(skb, conn->handle, flags);
+               break;
+       case HCI_AMP:
+               hci_add_acl_hdr(skb, chan->handle, flags);
+               break;
+       default:
+               BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
+               return;
+       }
  
        list = skb_shinfo(skb)->frag_list;
        if (!list) {
  
  void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
  {
-       struct hci_conn *conn = chan->conn;
-       struct hci_dev *hdev = conn->hdev;
+       struct hci_dev *hdev = chan->conn->hdev;
  
        BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
  
        skb->dev = (void *) hdev;
  
-       hci_queue_acl(conn, &chan->data_q, skb, flags);
+       hci_queue_acl(chan, &chan->data_q, skb, flags);
  
        queue_work(hdev->workqueue, &hdev->tx_work);
  }
@@@ -2311,8 -2388,8 +2388,8 @@@ static void hci_link_tx_to(struct hci_d
        /* Kill stalled connections */
        list_for_each_entry_rcu(c, &h->list, list) {
                if (c->type == type && c->sent) {
-                       BT_ERR("%s killing stalled connection %s",
-                              hdev->name, batostr(&c->dst));
+                       BT_ERR("%s killing stalled connection %pMR",
+                              hdev->name, &c->dst);
                        hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
                }
        }
@@@ -2381,6 -2458,9 +2458,9 @@@ static struct hci_chan *hci_chan_sent(s
        case ACL_LINK:
                cnt = hdev->acl_cnt;
                break;
+       case AMP_LINK:
+               cnt = hdev->block_cnt;
+               break;
        case SCO_LINK:
        case ESCO_LINK:
                cnt = hdev->sco_cnt;
@@@ -2510,11 -2590,19 +2590,19 @@@ static void hci_sched_acl_blk(struct hc
        struct hci_chan *chan;
        struct sk_buff *skb;
        int quote;
+       u8 type;
  
        __check_timeout(hdev, cnt);
  
+       BT_DBG("%s", hdev->name);
+       if (hdev->dev_type == HCI_AMP)
+               type = AMP_LINK;
+       else
+               type = ACL_LINK;
        while (hdev->block_cnt > 0 &&
-              (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+              (chan = hci_chan_sent(hdev, type, &quote))) {
                u32 priority = (skb_peek(&chan->data_q))->priority;
                while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
                        int blocks;
        }
  
        if (cnt != hdev->block_cnt)
-               hci_prio_recalculate(hdev, ACL_LINK);
+               hci_prio_recalculate(hdev, type);
  }
  
  static void hci_sched_acl(struct hci_dev *hdev)
  {
        BT_DBG("%s", hdev->name);
  
-       if (!hci_conn_num(hdev, ACL_LINK))
+       /* No ACL link over BR/EDR controller */
+       if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
+               return;
+       /* No AMP link over AMP controller */
+       if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
                return;
  
        switch (hdev->flow_ctl_mode) {
@@@ -2717,14 -2810,6 +2810,6 @@@ static void hci_acldata_packet(struct h
        if (conn) {
                hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
  
-               hci_dev_lock(hdev);
-               if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
-                   !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
-                       mgmt_device_connected(hdev, &conn->dst, conn->type,
-                                             conn->dst_type, 0, NULL, 0,
-                                             conn->dev_class);
-               hci_dev_unlock(hdev);
                /* Send to upper protocol */
                l2cap_recv_acldata(conn, skb, flags);
                return;
diff --combined net/sctp/sm_make_chunk.c
index 16a10850ed39db57690a0a0d2bd16b7577b3bd75,e1c5fc2be6b8f2f245c091c54a927d1440abc1ee..04df5301df04dd11675640687276fea685d9d043
@@@ -804,10 -804,11 +804,11 @@@ struct sctp_chunk *sctp_make_sack(cons
                                 gabs);
  
        /* Add the duplicate TSN information.  */
-       if (num_dup_tsns)
+       if (num_dup_tsns) {
+               aptr->stats.idupchunks += num_dup_tsns;
                sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
                                 sctp_tsnmap_get_dups(map));
+       }
        /* Once we have a sack generated, check to see what our sack
         * generation is, if its 0, reset the transports to 0, and reset
         * the association generation to 1
@@@ -1090,6 -1091,25 +1091,25 @@@ nodata
        return retval;
  }
  
+ struct sctp_chunk *sctp_make_violation_max_retrans(
+       const struct sctp_association *asoc,
+       const struct sctp_chunk *chunk)
+ {
+       struct sctp_chunk *retval;
+       static const char error[] = "Association exceeded its max_retans count";
+       size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t);
+       retval = sctp_make_abort(asoc, chunk, payload_len);
+       if (!retval)
+               goto nodata;
+       sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error));
+       sctp_addto_chunk(retval, sizeof(error), error);
+ nodata:
+       return retval;
+ }
  /* Make a HEARTBEAT chunk.  */
  struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
                                  const struct sctp_transport *transport)
@@@ -1181,7 -1201,7 +1201,7 @@@ nodata
   * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
   * This is a helper function to allocate an error chunk for
   * for those invalid parameter codes in which we may not want
 - * to report all the errors, if the incomming chunk is large
 + * to report all the errors, if the incoming chunk is large
   */
  static inline struct sctp_chunk *sctp_make_op_error_fixed(
        const struct sctp_association *asoc,
index f848878d553f0bbf33229459367aecba3bf097c1,6c12ac206ee9ab8f2d4fc018c22e3e45bb2365d1..a153b168129bc493fae94cd58c88bd2bfe7a2cf8
@@@ -2147,7 -2147,7 +2147,7 @@@ static int ab8500_codec_set_dai_fmt(str
        status = ab8500_codec_set_dai_clock_gate(codec, fmt);
        if (status) {
                dev_err(dai->codec->dev,
 -                      "%s: ERRROR: Failed to set clock gate (%d).\n",
 +                      "%s: ERROR: Failed to set clock gate (%d).\n",
                        __func__, status);
                return status;
        }
@@@ -2356,7 -2356,7 +2356,7 @@@ static int ab8500_codec_set_dai_tdm_slo
        return 0;
  }
  
- struct snd_soc_dai_driver ab8500_codec_dai[] = {
+ static struct snd_soc_dai_driver ab8500_codec_dai[] = {
        {
                .name = "ab8500-codec-dai.0",
                .id = 0,
@@@ -2554,7 -2554,7 +2554,7 @@@ static struct snd_soc_codec_driver ab85
        .num_dapm_routes =      ARRAY_SIZE(ab8500_dapm_routes),
  };
  
- static int __devinit ab8500_codec_driver_probe(struct platform_device *pdev)
+ static int ab8500_codec_driver_probe(struct platform_device *pdev)
  {
        int status;
        struct ab8500_codec_drvdata *drvdata;
        return status;
  }
  
- static int __devexit ab8500_codec_driver_remove(struct platform_device *pdev)
+ static int ab8500_codec_driver_remove(struct platform_device *pdev)
  {
        dev_info(&pdev->dev, "%s Enter.\n", __func__);
  
@@@ -2595,7 -2595,7 +2595,7 @@@ static struct platform_driver ab8500_co
                .owner  = THIS_MODULE,
        },
        .probe          = ab8500_codec_driver_probe,
-       .remove         = __devexit_p(ab8500_codec_driver_remove),
+       .remove         = ab8500_codec_driver_remove,
        .suspend        = NULL,
        .resume         = NULL,
  };
index cc4c1c050b9fffb8897971362911f1da691d36a3,ea58b73e86b28df311fda7545463089b90e89b6b..b47c252ef901e3458c743a9bb1d020dd4570300c
@@@ -113,15 -113,15 +113,15 @@@ SOC_ENUM("Equaliser Function", wm8974_e
  SOC_ENUM("EQ1 Cut Off", wm8974_enum[4]),
  SOC_SINGLE_TLV("EQ1 Volume", WM8974_EQ1,  0, 24, 1, eq_tlv),
  
 -SOC_ENUM("Equaliser EQ2 Bandwith", wm8974_enum[5]),
 +SOC_ENUM("Equaliser EQ2 Bandwidth", wm8974_enum[5]),
  SOC_ENUM("EQ2 Cut Off", wm8974_enum[6]),
  SOC_SINGLE_TLV("EQ2 Volume", WM8974_EQ2,  0, 24, 1, eq_tlv),
  
 -SOC_ENUM("Equaliser EQ3 Bandwith", wm8974_enum[7]),
 +SOC_ENUM("Equaliser EQ3 Bandwidth", wm8974_enum[7]),
  SOC_ENUM("EQ3 Cut Off", wm8974_enum[8]),
  SOC_SINGLE_TLV("EQ3 Volume", WM8974_EQ3,  0, 24, 1, eq_tlv),
  
 -SOC_ENUM("Equaliser EQ4 Bandwith", wm8974_enum[9]),
 +SOC_ENUM("Equaliser EQ4 Bandwidth", wm8974_enum[9]),
  SOC_ENUM("EQ4 Cut Off", wm8974_enum[10]),
  SOC_SINGLE_TLV("EQ4 Volume", WM8974_EQ4,  0, 24, 1, eq_tlv),
  
@@@ -625,8 -625,8 +625,8 @@@ static struct snd_soc_codec_driver soc_
        .num_dapm_routes = ARRAY_SIZE(wm8974_dapm_routes),
  };
  
- static __devinit int wm8974_i2c_probe(struct i2c_client *i2c,
-                                     const struct i2c_device_id *id)
+ static int wm8974_i2c_probe(struct i2c_client *i2c,
+                           const struct i2c_device_id *id)
  {
        int ret;
  
        return ret;
  }
  
- static __devexit int wm8974_i2c_remove(struct i2c_client *client)
+ static int wm8974_i2c_remove(struct i2c_client *client)
  {
        snd_soc_unregister_codec(&client->dev);
  
@@@ -655,7 -655,7 +655,7 @@@ static struct i2c_driver wm8974_i2c_dri
                .owner = THIS_MODULE,
        },
        .probe =    wm8974_i2c_probe,
-       .remove =   __devexit_p(wm8974_i2c_remove),
+       .remove =   wm8974_i2c_remove,
        .id_table = wm8974_i2c_id,
  };
  
index 4302071a74f15c122cb230f692e9017942db5e26,f347af3a67c2d2d390e50d523b33cd3372530b1c..029f31c8e7036b12bd79543960279c356ab1806b
@@@ -166,15 -166,15 +166,15 @@@ static const struct snd_kcontrol_new wm
        SOC_ENUM("EQ1 Cut Off", eq1),
        SOC_SINGLE_TLV("EQ1 Volume", WM8978_EQ1,  0, 24, 1, eq_tlv),
  
 -      SOC_ENUM("Equaliser EQ2 Bandwith", eq2bw),
 +      SOC_ENUM("Equaliser EQ2 Bandwidth", eq2bw),
        SOC_ENUM("EQ2 Cut Off", eq2),
        SOC_SINGLE_TLV("EQ2 Volume", WM8978_EQ2,  0, 24, 1, eq_tlv),
  
 -      SOC_ENUM("Equaliser EQ3 Bandwith", eq3bw),
 +      SOC_ENUM("Equaliser EQ3 Bandwidth", eq3bw),
        SOC_ENUM("EQ3 Cut Off", eq3),
        SOC_SINGLE_TLV("EQ3 Volume", WM8978_EQ3,  0, 24, 1, eq_tlv),
  
 -      SOC_ENUM("Equaliser EQ4 Bandwith", eq4bw),
 +      SOC_ENUM("Equaliser EQ4 Bandwidth", eq4bw),
        SOC_ENUM("EQ4 Cut Off", eq4),
        SOC_SINGLE_TLV("EQ4 Volume", WM8978_EQ4,  0, 24, 1, eq_tlv),
  
@@@ -527,9 -527,6 +527,6 @@@ static int wm8978_configure_pll(struct 
                        return idx;
  
                wm8978->mclk_idx = idx;
-               /* GPIO1 into default mode as input - before configuring PLL */
-               snd_soc_update_bits(codec, WM8978_GPIO_CONTROL, 7, 0);
        } else {
                return -EINVAL;
        }
@@@ -782,7 -779,7 +779,7 @@@ static int wm8978_hw_params(struct snd_
                wm8978->mclk_idx = -1;
                f_sel = wm8978->f_mclk;
        } else {
-               if (!wm8978->f_pllout) {
+               if (!wm8978->f_opclk) {
                        /* We only enter here, if OPCLK is not used */
                        int ret = wm8978_configure_pll(codec);
                        if (ret < 0)
@@@ -1038,8 -1035,8 +1035,8 @@@ static const struct regmap_config wm897
        .num_reg_defaults = ARRAY_SIZE(wm8978_reg_defaults),
  };
  
- static __devinit int wm8978_i2c_probe(struct i2c_client *i2c,
-                                     const struct i2c_device_id *id)
+ static int wm8978_i2c_probe(struct i2c_client *i2c,
+                           const struct i2c_device_id *id)
  {
        struct wm8978_priv *wm8978;
        int ret;
        if (wm8978 == NULL)
                return -ENOMEM;
  
-       wm8978->regmap = regmap_init_i2c(i2c, &wm8978_regmap_config);
+       wm8978->regmap = devm_regmap_init_i2c(i2c, &wm8978_regmap_config);
        if (IS_ERR(wm8978->regmap)) {
                ret = PTR_ERR(wm8978->regmap);
                dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
        ret = regmap_write(wm8978->regmap, WM8978_RESET, 0);
        if (ret != 0) {
                dev_err(&i2c->dev, "Failed to issue reset: %d\n", ret);
-               goto err;
+               return ret;
        }
  
        ret = snd_soc_register_codec(&i2c->dev,
                        &soc_codec_dev_wm8978, &wm8978_dai, 1);
        if (ret != 0) {
                dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
-               goto err;
+               return ret;
        }
  
        return 0;
- err:
-       regmap_exit(wm8978->regmap);
-       return ret;
  }
  
- static __devexit int wm8978_i2c_remove(struct i2c_client *client)
+ static int wm8978_i2c_remove(struct i2c_client *client)
  {
-       struct wm8978_priv *wm8978 = i2c_get_clientdata(client);
        snd_soc_unregister_codec(&client->dev);
-       regmap_exit(wm8978->regmap);
  
        return 0;
  }
@@@ -1101,7 -1091,7 +1091,7 @@@ static struct i2c_driver wm8978_i2c_dri
                .owner = THIS_MODULE,
        },
        .probe =    wm8978_i2c_probe,
-       .remove =   __devexit_p(wm8978_i2c_remove),
+       .remove =   wm8978_i2c_remove,
        .id_table = wm8978_i2c_id,
  };
  
index 494e42f18946ab41c1f72206a9cc8f55fc53968a,9fe1e041da498cec9bc335959c92d569ca0102b9..0867870ffdfcea41f560b2ac92939d6894a0f674
@@@ -353,13 -353,13 +353,13 @@@ static const struct snd_kcontrol_new wm
        SOC_ENUM_EXT("Equalizer Function", eqmode, eqmode_get, eqmode_put),
        SOC_ENUM("EQ1 Cutoff", eq1_cutoff),
        SOC_SINGLE_TLV("EQ1 Volume", WM8983_EQ1_LOW_SHELF,  0, 24, 1, eq_tlv),
 -      SOC_ENUM("EQ2 Bandwith", eq2_bw),
 +      SOC_ENUM("EQ2 Bandwidth", eq2_bw),
        SOC_ENUM("EQ2 Cutoff", eq2_cutoff),
        SOC_SINGLE_TLV("EQ2 Volume", WM8983_EQ2_PEAK_1, 0, 24, 1, eq_tlv),
 -      SOC_ENUM("EQ3 Bandwith", eq3_bw),
 +      SOC_ENUM("EQ3 Bandwidth", eq3_bw),
        SOC_ENUM("EQ3 Cutoff", eq3_cutoff),
        SOC_SINGLE_TLV("EQ3 Volume", WM8983_EQ3_PEAK_2, 0, 24, 1, eq_tlv),
 -      SOC_ENUM("EQ4 Bandwith", eq4_bw),
 +      SOC_ENUM("EQ4 Bandwidth", eq4_bw),
        SOC_ENUM("EQ4 Cutoff", eq4_cutoff),
        SOC_SINGLE_TLV("EQ4 Volume", WM8983_EQ4_PEAK_3, 0, 24, 1, eq_tlv),
        SOC_ENUM("EQ5 Cutoff", eq5_cutoff),
@@@ -1087,7 -1087,7 +1087,7 @@@ static const struct regmap_config wm898
  };
  
  #if defined(CONFIG_SPI_MASTER)
- static int __devinit wm8983_spi_probe(struct spi_device *spi)
+ static int wm8983_spi_probe(struct spi_device *spi)
  {
        struct wm8983_priv *wm8983;
        int ret;
        return ret;
  }
  
- static int __devexit wm8983_spi_remove(struct spi_device *spi)
+ static int wm8983_spi_remove(struct spi_device *spi)
  {
        snd_soc_unregister_codec(&spi->dev);
        return 0;
@@@ -1122,13 -1122,13 +1122,13 @@@ static struct spi_driver wm8983_spi_dri
                .owner = THIS_MODULE,
        },
        .probe = wm8983_spi_probe,
-       .remove = __devexit_p(wm8983_spi_remove)
+       .remove = wm8983_spi_remove
  };
  #endif
  
  #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- static __devinit int wm8983_i2c_probe(struct i2c_client *i2c,
-                                     const struct i2c_device_id *id)
+ static int wm8983_i2c_probe(struct i2c_client *i2c,
+                           const struct i2c_device_id *id)
  {
        struct wm8983_priv *wm8983;
        int ret;
        return ret;
  }
  
- static __devexit int wm8983_i2c_remove(struct i2c_client *client)
+ static int wm8983_i2c_remove(struct i2c_client *client)
  {
        snd_soc_unregister_codec(&client->dev);
        return 0;
@@@ -1170,7 -1170,7 +1170,7 @@@ static struct i2c_driver wm8983_i2c_dri
                .owner = THIS_MODULE,
        },
        .probe = wm8983_i2c_probe,
-       .remove = __devexit_p(wm8983_i2c_remove),
+       .remove = wm8983_i2c_remove,
        .id_table = wm8983_i2c_id
  };
  #endif
index 3b37fc4a7ea71234496a6bb18d87f4ae6b168854,ab3782657ac81e7ae11afa0142e1d2d5a1e4d483..7d6b9f23cfe0f9942f95d1ba7a3e5a979ed70b04
@@@ -371,13 -371,13 +371,13 @@@ static const struct snd_kcontrol_new wm
        SOC_ENUM_EXT("Equalizer Function", eqmode, eqmode_get, eqmode_put),
        SOC_ENUM("EQ1 Cutoff", eq1_cutoff),
        SOC_SINGLE_TLV("EQ1 Volume", WM8985_EQ1_LOW_SHELF,  0, 24, 1, eq_tlv),
 -      SOC_ENUM("EQ2 Bandwith", eq2_bw),
 +      SOC_ENUM("EQ2 Bandwidth", eq2_bw),
        SOC_ENUM("EQ2 Cutoff", eq2_cutoff),
        SOC_SINGLE_TLV("EQ2 Volume", WM8985_EQ2_PEAK_1, 0, 24, 1, eq_tlv),
 -      SOC_ENUM("EQ3 Bandwith", eq3_bw),
 +      SOC_ENUM("EQ3 Bandwidth", eq3_bw),
        SOC_ENUM("EQ3 Cutoff", eq3_cutoff),
        SOC_SINGLE_TLV("EQ3 Volume", WM8985_EQ3_PEAK_2, 0, 24, 1, eq_tlv),
 -      SOC_ENUM("EQ4 Bandwith", eq4_bw),
 +      SOC_ENUM("EQ4 Bandwidth", eq4_bw),
        SOC_ENUM("EQ4 Cutoff", eq4_cutoff),
        SOC_SINGLE_TLV("EQ4 Volume", WM8985_EQ4_PEAK_3, 0, 24, 1, eq_tlv),
        SOC_ENUM("EQ5 Cutoff", eq5_cutoff),
@@@ -1111,7 -1111,7 +1111,7 @@@ static const struct regmap_config wm898
  };
  
  #if defined(CONFIG_SPI_MASTER)
- static int __devinit wm8985_spi_probe(struct spi_device *spi)
+ static int wm8985_spi_probe(struct spi_device *spi)
  {
        struct wm8985_priv *wm8985;
        int ret;
  
        spi_set_drvdata(spi, wm8985);
  
-       wm8985->regmap = regmap_init_spi(spi, &wm8985_regmap);
+       wm8985->regmap = devm_regmap_init_spi(spi, &wm8985_regmap);
        if (IS_ERR(wm8985->regmap)) {
                ret = PTR_ERR(wm8985->regmap);
                dev_err(&spi->dev, "Failed to allocate register map: %d\n",
                        ret);
-               goto err;
+               return ret;
        }
  
        ret = snd_soc_register_codec(&spi->dev,
                                     &soc_codec_dev_wm8985, &wm8985_dai, 1);
-       if (ret != 0)
-               goto err;
-       return 0;
- err:
-       regmap_exit(wm8985->regmap);
        return ret;
  }
  
- static int __devexit wm8985_spi_remove(struct spi_device *spi)
+ static int wm8985_spi_remove(struct spi_device *spi)
  {
-       struct wm8985_priv *wm8985 = spi_get_drvdata(spi);
        snd_soc_unregister_codec(&spi->dev);
-       regmap_exit(wm8985->regmap);
        return 0;
  }
  
@@@ -1158,13 -1147,13 +1147,13 @@@ static struct spi_driver wm8985_spi_dri
                .owner = THIS_MODULE,
        },
        .probe = wm8985_spi_probe,
-       .remove = __devexit_p(wm8985_spi_remove)
+       .remove = wm8985_spi_remove
  };
  #endif
  
  #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- static __devinit int wm8985_i2c_probe(struct i2c_client *i2c,
-                                     const struct i2c_device_id *id)
+ static int wm8985_i2c_probe(struct i2c_client *i2c,
+                           const struct i2c_device_id *id)
  {
        struct wm8985_priv *wm8985;
        int ret;
  
        i2c_set_clientdata(i2c, wm8985);
  
-       wm8985->regmap = regmap_init_i2c(i2c, &wm8985_regmap);
+       wm8985->regmap = devm_regmap_init_i2c(i2c, &wm8985_regmap);
        if (IS_ERR(wm8985->regmap)) {
                ret = PTR_ERR(wm8985->regmap);
                dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
                        ret);
-               goto err;
+               return ret;
        }
  
        ret = snd_soc_register_codec(&i2c->dev,
                                     &soc_codec_dev_wm8985, &wm8985_dai, 1);
-       if (ret != 0)
-               goto err;
-       return 0;
- err:
-       regmap_exit(wm8985->regmap);
        return ret;
  }
  
- static __devexit int wm8985_i2c_remove(struct i2c_client *i2c)
+ static int wm8985_i2c_remove(struct i2c_client *i2c)
  {
-       struct wm8985_priv *wm8985 = i2c_get_clientdata(i2c);
        snd_soc_unregister_codec(&i2c->dev);
-       regmap_exit(wm8985->regmap);
        return 0;
  }
  
@@@ -1217,7 -1195,7 +1195,7 @@@ static struct i2c_driver wm8985_i2c_dri
                .owner = THIS_MODULE,
        },
        .probe = wm8985_i2c_probe,
-       .remove = __devexit_p(wm8985_i2c_remove),
+       .remove = wm8985_i2c_remove,
        .id_table = wm8985_i2c_id
  };
  #endif