aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2011-12-21 21:59:45 +0100
committerRafael J. Wysocki <rjw@sisk.pl>2011-12-21 21:59:45 +0100
commitb00f4dc5ff022cb9cbaffd376d9454d7fa1e496f (patch)
tree40f1b232e2f1e8ac365317a14fdcbcb331722b46
parent1eac8111e0763853266a171ce11214da3a347a0a (diff)
parentb9e26dfdad5a4f9cbdaacafac6998614cc9c41bc (diff)
Merge branch 'master' into pm-sleep
* master: (848 commits) SELinux: Fix RCU deref check warning in sel_netport_insert() binary_sysctl(): fix memory leak mm/vmalloc.c: remove static declaration of va from __get_vm_area_node ipmi_watchdog: restore settings when BMC reset oom: fix integer overflow of points in oom_badness memcg: keep root group unchanged if creation fails nilfs2: potential integer overflow in nilfs_ioctl_clean_segments() nilfs2: unbreak compat ioctl cpusets: stall when updating mems_allowed for mempolicy or disjoint nodemask evm: prevent racing during tfm allocation evm: key must be set once during initialization mmc: vub300: fix type of firmware_rom_wait_states module parameter Revert "mmc: enable runtime PM by default" mmc: sdhci: remove "state" argument from sdhci_suspend_host x86, dumpstack: Fix code bytes breakage due to missing KERN_CONT IB/qib: Correct sense on freectxts increment and decrement RDMA/cma: Verify private data length cgroups: fix a css_set not found bug in cgroup_attach_proc oprofile: Fix uninitialized memory access when writing to writing to oprofilefs Revert "xen/pv-on-hvm kexec: add xs_reset_watches to shutdown watches from old kernel" ... Conflicts: kernel/cgroup_freezer.c
-rw-r--r--CREDITS9
-rw-r--r--Documentation/ABI/testing/sysfs-bus-rbd7
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/filesystems/btrfs.txt4
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--Documentation/networking/ip-sysctl.txt10
-rw-r--r--Documentation/power/devices.txt111
-rw-r--r--Documentation/power/runtime_pm.txt40
-rw-r--r--Documentation/sound/alsa/soc/machine.txt6
-rw-r--r--Documentation/usb/linux-cdc-acm.inf4
-rw-r--r--MAINTAINERS58
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig23
-rw-r--r--arch/arm/common/gic.c16
-rw-r--r--arch/arm/common/pl330.c12
-rw-r--r--arch/arm/configs/at91cap9_defconfig (renamed from arch/arm/configs/at91cap9adk_defconfig)7
-rw-r--r--arch/arm/configs/at91rm9200_defconfig47
-rw-r--r--arch/arm/configs/at91sam9260_defconfig (renamed from arch/arm/configs/at91sam9260ek_defconfig)16
-rw-r--r--arch/arm/configs/at91sam9g20_defconfig (renamed from arch/arm/configs/at91sam9g20ek_defconfig)23
-rw-r--r--arch/arm/configs/at91sam9g45_defconfig7
-rw-r--r--arch/arm/configs/at91sam9rl_defconfig (renamed from arch/arm/configs/at91sam9rlek_defconfig)5
-rw-r--r--arch/arm/configs/ezx_defconfig2
-rw-r--r--arch/arm/configs/imote2_defconfig2
-rw-r--r--arch/arm/configs/magician_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig6
-rw-r--r--arch/arm/configs/u300_defconfig13
-rw-r--r--arch/arm/configs/u8500_defconfig14
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/include/asm/pmu.h10
-rw-r--r--arch/arm/include/asm/topology.h2
-rw-r--r--arch/arm/include/asm/unwind.h16
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/kprobes-arm.c4
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c27
-rw-r--r--arch/arm/kernel/kprobes-test-thumb.c16
-rw-r--r--arch/arm/kernel/kprobes-test.h100
-rw-r--r--arch/arm/kernel/perf_event.c20
-rw-r--r--arch/arm/kernel/pmu.c1
-rw-r--r--arch/arm/kernel/process.c3
-rw-r--r--arch/arm/kernel/setup.c14
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/kernel/unwind.c129
-rw-r--r--arch/arm/lib/bitops.h26
-rw-r--r--arch/arm/lib/changebit.S4
-rw-r--r--arch/arm/lib/clearbit.S4
-rw-r--r--arch/arm/lib/setbit.S4
-rw-r--r--arch/arm/lib/testchangebit.S4
-rw-r--r--arch/arm/lib/testclearbit.S4
-rw-r--r--arch/arm/lib/testsetbit.S4
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260.c6
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c2
-rw-r--r--arch/arm/mach-at91/include/mach/system_rev.h2
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c6
-rw-r--r--arch/arm/mach-davinci/dm646x.c1
-rw-r--r--arch/arm/mach-davinci/include/mach/psc.h5
-rw-r--r--arch/arm/mach-davinci/psc.c18
-rw-r--r--arch/arm/mach-exynos/cpuidle.c2
-rw-r--r--arch/arm/mach-exynos/mct.c13
-rw-r--r--arch/arm/mach-highbank/highbank.c4
-rw-r--r--arch/arm/mach-imx/Kconfig13
-rw-r--r--arch/arm/mach-imx/clock-imx6q.c7
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c10
-rw-r--r--arch/arm/mach-imx/mm-imx3.c109
-rw-r--r--arch/arm/mach-imx/src.c7
-rw-r--r--arch/arm/mach-mmp/gplugd.c2
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio-pxa.h2
-rw-r--r--arch/arm/mach-msm/devices-iommu.c1
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_evk.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_loco.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_smd.c2
-rw-r--r--arch/arm/mach-mx5/cpu.c5
-rw-r--r--arch/arm/mach-mx5/imx51-dt.c12
-rw-r--r--arch/arm/mach-mx5/imx53-dt.c12
-rw-r--r--arch/arm/mach-mx5/mm.c6
-rw-r--r--arch/arm/mach-mxs/clock-mx28.c2
-rw-r--r--arch/arm/mach-mxs/include/mach/mx28.h4
-rw-r--r--arch/arm/mach-mxs/include/mach/mxs.h1
-rw-r--r--arch/arm/mach-mxs/mach-m28evk.c2
-rw-r--r--arch/arm/mach-mxs/mach-stmp378x_devb.c2
-rw-r--r--arch/arm/mach-mxs/module-tx28.c4
-rw-r--r--arch/arm/mach-omap1/Kconfig8
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c10
-rw-r--r--arch/arm/mach-omap1/clock.h3
-rw-r--r--arch/arm/mach-omap1/clock_data.c61
-rw-r--r--arch/arm/mach-omap1/devices.c3
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/Makefile5
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c1
-rw-r--r--arch/arm/mach-omap2/display.c159
-rw-r--r--arch/arm/mach-omap2/display.h29
-rw-r--r--arch/arm/mach-omap2/io.h0
-rw-r--r--arch/arm/mach-omap2/mcbsp.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c37
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.h4
-rw-r--r--arch/arm/mach-omap2/omap_l3_noc.c2
-rw-r--r--arch/arm/mach-omap2/pm.c6
-rw-r--r--arch/arm/mach-omap2/smartreflex.c2
-rw-r--r--arch/arm/mach-omap2/twl-common.c11
-rw-r--r--arch/arm/mach-omap2/twl-common.h3
-rw-r--r--arch/arm/mach-prima2/pm.c1
-rw-r--r--arch/arm/mach-prima2/prima2.c1
-rw-r--r--arch/arm/mach-pxa/balloon3.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c2
-rw-r--r--arch/arm/mach-pxa/gumstix.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/palm27x.h4
-rw-r--r--arch/arm/mach-pxa/palm27x.c4
-rw-r--r--arch/arm/mach-pxa/palmtc.c2
-rw-r--r--arch/arm/mach-pxa/vpac270.c2
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c2
-rw-r--r--arch/arm/mach-s3c64xx/s3c6400.c2
-rw-r--r--arch/arm/mach-s3c64xx/setup-fb-24bpp.c2
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c1
-rw-r--r--arch/arm/mach-sa1100/Makefile.boot4
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-kota2.c139
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c18
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/mm/dma-mapping.c11
-rw-r--r--arch/arm/mm/mmap.c23
-rw-r--r--arch/arm/plat-mxc/cpufreq.c1
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc.h14
-rw-r--r--arch/arm/plat-mxc/include/mach/system.h7
-rw-r--r--arch/arm/plat-mxc/pwm.c7
-rw-r--r--arch/arm/plat-mxc/system.c3
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h2
-rw-r--r--arch/arm/plat-omap/include/plat/common.h3
-rw-r--r--arch/arm/plat-s3c24xx/cpu-freq-debugfs.c2
-rw-r--r--arch/arm/plat-s5p/sysmmu.c1
-rw-r--r--arch/arm/plat-samsung/dev-backlight.c1
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h2
-rw-r--r--arch/arm/plat-samsung/pd.c2
-rw-r--r--arch/arm/plat-samsung/pwm.c2
-rw-r--r--arch/arm/tools/mach-types1
-rw-r--r--arch/m68k/include/asm/unistd.h4
-rw-r--r--arch/m68k/kernel/syscalltable.S2
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c8
-rw-r--r--arch/powerpc/boot/dts/p1023rds.dts17
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/p3060_qds.c2
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c2
-rw-r--r--arch/s390/include/asm/pgtable.h8
-rw-r--r--arch/s390/kernel/ptrace.c30
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/signal.c8
-rw-r--r--arch/s390/oprofile/init.c2
-rw-r--r--arch/sh/boards/board-sh7757lcr.c16
-rw-r--r--arch/sparc/kernel/ds.c6
-rw-r--r--arch/sparc/kernel/prom_common.c4
-rw-r--r--arch/sparc/mm/btfixup.c3
-rw-r--r--arch/tile/include/asm/irq.h10
-rw-r--r--arch/tile/kernel/irq.c16
-rw-r--r--arch/tile/kernel/pci-dma.c1
-rw-r--r--arch/tile/kernel/pci.c1
-rw-r--r--arch/tile/kernel/sysfs.c1
-rw-r--r--arch/tile/lib/exports.c3
-rw-r--r--arch/tile/mm/homecache.c9
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/include/asm/intel_scu_ipc.h14
-rw-r--r--arch/x86/include/asm/mrst.h9
-rw-r--r--arch/x86/include/asm/msr.h9
-rw-r--r--arch/x86/include/asm/system.h1
-rw-r--r--arch/x86/include/asm/timer.h23
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c8
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c29
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
-rw-r--r--arch/x86/kernel/dumpstack_32.c8
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/hpet.c21
-rw-r--r--arch/x86/kernel/irq_64.c3
-rw-r--r--arch/x86/kernel/microcode_core.c28
-rw-r--r--arch/x86/kernel/mpparse.c2
-rw-r--r--arch/x86/kernel/process.c8
-rw-r--r--arch/x86/kernel/quirks.c13
-rw-r--r--arch/x86/kernel/reboot.c21
-rw-r--r--arch/x86/kernel/rtc.c5
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/highmem_32.c2
-rw-r--r--arch/x86/oprofile/init.c7
-rw-r--r--arch/x86/platform/efi/efi_32.c48
-rw-r--r--arch/x86/platform/mrst/mrst.c68
-rw-r--r--arch/x86/xen/setup.c20
-rw-r--r--block/blk-core.c23
-rw-r--r--block/cfq-iosched.c16
-rw-r--r--drivers/acpi/apei/erst.c31
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/rbd.c101
-rw-r--r--drivers/block/swim3.c362
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c15
-rw-r--r--drivers/bluetooth/btusb.c3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c41
-rw-r--r--drivers/crypto/mv_cesa.c12
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/firmware/efivars.c12
-rw-r--r--drivers/firmware/iscsi_ibft.c42
-rw-r--r--drivers/firmware/iscsi_ibft_find.c26
-rw-r--r--drivers/firmware/sigma.c81
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-da9052.c21
-rw-r--r--drivers/gpio/gpio-ml-ioh.c32
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c18
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-pl061.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c43
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c19
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h30
-rw-r--r--drivers/gpu/drm/i915/intel_display.c89
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c173
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c16
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c35
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c149
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h29
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h31
-rw-r--r--drivers/gpu/drm/radeon/r100.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c405
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c23
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hwmon/ad7314.c1
-rw-r--r--drivers/hwmon/ads7871.c1
-rw-r--r--drivers/hwmon/exynos4_tmu.c12
-rw-r--r--drivers/hwmon/gpio-fan.c13
-rw-r--r--drivers/hwmon/jz4740-hwmon.c16
-rw-r--r--drivers/hwmon/ntc_thermistor.c14
-rw-r--r--drivers/hwmon/s3c-hwmon.c13
-rw-r--r--drivers/hwmon/sch5627.c13
-rw-r--r--drivers/hwmon/sch5636.c13
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c14
-rw-r--r--drivers/hwmon/ultra45_env.c13
-rw-r--r--drivers/hwmon/wm831x-hwmon.c12
-rw-r--r--drivers/hwmon/wm8350-hwmon.c12
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c22
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/infiniband/core/addr.c9
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c13
-rw-r--r--drivers/input/misc/cma3000_d0x.c4
-rw-r--r--drivers/input/mouse/synaptics.c11
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/iommu/intel-iommu.c7
-rw-r--r--drivers/iommu/intr_remapping.c2
-rw-r--r--drivers/isdn/divert/divert_procfs.c6
-rw-r--r--drivers/isdn/i4l/isdn_net.c3
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/media/common/tuners/mxl5007t.c3
-rw-r--r--drivers/media/common/tuners/tda18218.c2
-rw-r--r--drivers/media/rc/ati_remote.c111
-rw-r--r--drivers/media/rc/keymaps/rc-ati-x10.c96
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10.c128
-rw-r--r--drivers/media/rc/keymaps/rc-snapstream-firefly.c114
-rw-r--r--drivers/media/video/au0828/au0828-cards.c7
-rw-r--r--drivers/media/video/m5mols/m5mols.h2
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c22
-rw-r--r--drivers/media/video/mt9m111.c1
-rw-r--r--drivers/media/video/mt9t112.c4
-rw-r--r--drivers/media/video/omap/omap_vout.c9
-rw-r--r--drivers/media/video/omap1_camera.c1
-rw-r--r--drivers/media/video/omap24xxcam-dma.c2
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c1
-rw-r--r--drivers/media/video/ov6650.c2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c14
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c24
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c43
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c15
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c2
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c1
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c34
-rw-r--r--drivers/media/video/sh_mobile_csi2.c4
-rw-r--r--drivers/media/video/soc_camera.c3
-rw-r--r--drivers/mmc/card/block.c8
-rw-r--r--drivers/mmc/core/core.c98
-rw-r--r--drivers/mmc/core/host.c11
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/host/mxcmmc.c1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c6
-rw-r--r--drivers/mmc/host/sdhci-dove.c5
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c5
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c5
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c5
-rw-r--r--drivers/mmc/host/sdhci-pci.c26
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c18
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h6
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c5
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c5
-rw-r--r--drivers/mmc/host/sdhci-s3c.c23
-rw-r--r--drivers/mmc/host/sdhci-tegra.c5
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mmc/host/vub300.c2
-rw-r--r--drivers/mtd/maps/plat-ram.c12
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/ndfc.c2
-rw-r--r--drivers/net/arcnet/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c33
-rw-r--r--drivers/net/can/sja1000/peak_pci.c1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.c11
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c53
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c6
-rw-r--r--drivers/net/ethernet/ibm/iseries_veth.c2
-rw-r--r--drivers/net/ethernet/jme.c113
-rw-r--r--drivers/net/ethernet/jme.h19
-rw-r--r--drivers/net/ethernet/pasemi/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c14
-rw-r--r--drivers/net/ethernet/tile/tilepro.c8
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c36
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h4
-rw-r--r--drivers/net/wireless/p54/p54spi.c5
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/of/irq.c15
-rw-r--r--drivers/oprofile/oprof.c29
-rw-r--r--drivers/oprofile/oprofile_files.c7
-rw-r--r--drivers/oprofile/oprofilefs.c11
-rw-r--r--drivers/oprofile/timer_int.c1
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/ats.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c25
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c27
-rw-r--r--drivers/pci/hotplug/shpchp_core.c4
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c4
-rw-r--r--drivers/pci/iov.c7
-rw-r--r--drivers/pci/pci.c9
-rw-r--r--drivers/platform/x86/toshiba_acpi.c21
-rw-r--r--drivers/power/intel_mid_battery.c12
-rw-r--r--drivers/ptp/ptp_clock.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c41
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/regulator/aat2870-regulator.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/twl-regulator.c46
-rw-r--r--drivers/rtc/class.c10
-rw-r--r--drivers/rtc/interface.c50
-rw-r--r--drivers/rtc/rtc-m41t80.c9
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/s390/cio/chsc.c7
-rw-r--r--drivers/s390/cio/cio.h5
-rw-r--r--drivers/s390/cio/css.c104
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_fsm.c30
-rw-r--r--drivers/s390/cio/device_ops.c20
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c27
-rw-r--r--drivers/sbus/char/display7seg.c13
-rw-r--r--drivers/sbus/char/envctrl.c12
-rw-r--r--drivers/sbus/char/flash.c12
-rw-r--r--drivers/sbus/char/uctrl.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c5
-rw-r--r--drivers/scsi/fcoe/fcoe.c116
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h55
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h16
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c243
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1084
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-nuc900.c3
-rw-r--r--drivers/ssb/driver_pcicore.c8
-rw-r--r--drivers/staging/comedi/comedi_fops.c96
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c7
-rw-r--r--drivers/staging/iio/industrialio-core.c19
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/rts_pstor/rtsx.c1
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c4
-rw-r--r--drivers/staging/usbip/vhci_rx.c10
-rw-r--r--drivers/target/iscsi/iscsi_target.c26
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c41
-rw-r--r--drivers/target/target_core_alua.c27
-rw-r--r--drivers/target/target_core_cdb.c20
-rw-r--r--drivers/target/target_core_configfs.c11
-rw-r--r--drivers/target/target_core_device.c30
-rw-r--r--drivers/target/target_core_file.c20
-rw-r--r--drivers/target/target_core_iblock.c16
-rw-r--r--drivers/target/target_core_pr.c240
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c258
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_transport.c260
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c3
-rw-r--r--drivers/usb/class/cdc-acm.c10
-rw-r--r--drivers/usb/gadget/amd5536udc.c2
-rw-r--r--drivers/usb/gadget/f_mass_storage.c1
-rw-r--r--drivers/usb/gadget/f_serial.c4
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c75
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h10
-rw-r--r--drivers/usb/gadget/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/net2280.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c4
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c3
-rw-r--r--drivers/usb/host/ehci-sched.c9
-rw-r--r--drivers/usb/host/whci/qset.c2
-rw-r--r--drivers/usb/host/xhci.c5
-rw-r--r--drivers/usb/musb/musb_core.c6
-rw-r--r--drivers/usb/musb/musb_gadget.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c47
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/option.c9
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/video/da8xx-fb.c15
-rw-r--r--drivers/video/omap/dispc.c1
-rw-r--r--drivers/video/omap2/dss/dispc.c11
-rw-r--r--drivers/video/omap2/dss/hdmi.c2
-rw-r--r--drivers/video/via/share.h4
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_mmio.c2
-rw-r--r--drivers/virtio/virtio_pci.c18
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c13
-rw-r--r--fs/btrfs/async-thread.c117
-rw-r--r--fs/btrfs/async-thread.h4
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/delayed-inode.c4
-rw-r--r--fs/btrfs/disk-io.c34
-rw-r--r--fs/btrfs/extent-tree.c171
-rw-r--r--fs/btrfs/extent_io.c51
-rw-r--r--fs/btrfs/file.c8
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c182
-rw-r--r--fs/btrfs/ioctl.c8
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c13
-rw-r--r--fs/btrfs/super.c38
-rw-r--r--fs/btrfs/volumes.c10
-rw-r--r--fs/ceph/addr.c8
-rw-r--r--fs/ceph/caps.c187
-rw-r--r--fs/ceph/dir.c24
-rw-r--r--fs/ceph/file.c23
-rw-r--r--fs/ceph/inode.c53
-rw-r--r--fs/ceph/ioctl.c4
-rw-r--r--fs/ceph/mds_client.c33
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/snap.c16
-rw-r--r--fs/ceph/super.c2
-rw-r--r--fs/ceph/super.h31
-rw-r--r--fs/ceph/xattr.c42
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/file.c26
-rw-r--r--fs/cifs/readdir.c10
-rw-r--r--fs/cifs/smbencrypt.c6
-rw-r--r--fs/configfs/inode.c2
-rw-r--r--fs/configfs/mount.c36
-rw-r--r--fs/dcache.c71
-rw-r--r--fs/ecryptfs/crypto.c26
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h5
-rw-r--r--fs/ecryptfs/file.c23
-rw-r--r--fs/ecryptfs/inode.c52
-rw-r--r--fs/ext4/extents.c3
-rw-r--r--fs/ext4/inode.c54
-rw-r--r--fs/ext4/page-io.c12
-rw-r--r--fs/ext4/super.c17
-rw-r--r--fs/fs-writeback.c5
-rw-r--r--fs/fuse/dev.c3
-rw-r--r--fs/fuse/file.c6
-rw-r--r--fs/fuse/inode.c24
-rw-r--r--fs/namespace.c20
-rw-r--r--fs/ncpfs/inode.c8
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/nfs4proc.c24
-rw-r--r--fs/nfs/nfs4state.c33
-rw-r--r--fs/nilfs2/ioctl.c16
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c69
-rw-r--r--fs/ocfs2/aops.h14
-rw-r--r--fs/ocfs2/cluster/heartbeat.c194
-rw-r--r--fs/ocfs2/cluster/netdebug.c102
-rw-r--r--fs/ocfs2/cluster/tcp.c138
-rw-r--r--fs/ocfs2/cluster/tcp.h2
-rw-r--r--fs/ocfs2/dir.c3
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h56
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c44
-rw-r--r--fs/ocfs2/dlm/dlmlock.c54
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c175
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c164
-rw-r--r--fs/ocfs2/dlm/dlmthread.c16
-rw-r--r--fs/ocfs2/dlmglue.c21
-rw-r--r--fs/ocfs2/extent_map.c96
-rw-r--r--fs/ocfs2/extent_map.h2
-rw-r--r--fs/ocfs2/file.c96
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/inode.h3
-rw-r--r--fs/ocfs2/ioctl.c11
-rw-r--r--fs/ocfs2/journal.c23
-rw-r--r--fs/ocfs2/journal.h5
-rw-r--r--fs/ocfs2/mmap.c53
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/ocfs2.h51
-rw-r--r--fs/ocfs2/quota_local.c23
-rw-r--r--fs/ocfs2/slot_map.c4
-rw-r--r--fs/ocfs2/stack_o2cb.c71
-rw-r--r--fs/ocfs2/super.c25
-rw-r--r--fs/ocfs2/xattr.c10
-rw-r--r--fs/proc/meminfo.c7
-rw-r--r--fs/proc/root.c8
-rw-r--r--fs/proc/stat.c4
-rw-r--r--fs/pstore/platform.c13
-rw-r--r--fs/seq_file.c6
-rw-r--r--fs/ubifs/super.c18
-rw-r--r--fs/xfs/xfs_acl.c2
-rw-r--r--fs/xfs/xfs_attr_leaf.c64
-rw-r--r--fs/xfs/xfs_bmap.c20
-rw-r--r--fs/xfs/xfs_export.c8
-rw-r--r--fs/xfs/xfs_inode.c21
-rw-r--r--fs/xfs/xfs_inode.h1
-rw-r--r--fs/xfs/xfs_log.c348
-rw-r--r--fs/xfs/xfs_sync.c11
-rw-r--r--fs/xfs/xfs_trace.h12
-rw-r--r--include/asm-generic/unistd.h8
-rw-r--r--include/drm/drm_pciids.h18
-rw-r--r--include/drm/exynos_drm.h9
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/clocksource.h12
-rw-r--r--include/linux/compat.h9
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/dma_remapping.h2
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/log2.h1
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmc/card.h6
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/pci-ats.h6
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/pkt_sched.h6
-rw-r--r--include/linux/pm.h229
-rw-r--r--include/linux/pstore.h4
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--include/linux/sigma.h13
-rw-r--r--include/linux/virtio_config.h2
-rw-r--r--include/linux/virtio_mmio.h2
-rw-r--r--include/media/soc_camera.h7
-rw-r--r--include/net/dst.h7
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/inet_sock.h2
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h19
-rw-r--r--include/net/netns/conntrack.h2
-rw-r--r--include/net/red.h15
-rw-r--r--include/net/route.h4
-rw-r--r--include/scsi/libfcoe.h3
-rw-r--r--include/target/target_core_base.h46
-rw-r--r--include/target/target_core_transport.h24
-rw-r--r--include/video/omapdss.h7
-rw-r--r--include/xen/interface/io/xs_wire.h3
-rw-r--r--ipc/mqueue.c8
-rw-r--r--ipc/msgutil.c5
-rw-r--r--kernel/cgroup.c5
-rw-r--r--kernel/cgroup_freezer.c11
-rw-r--r--kernel/cpuset.c29
-rw-r--r--kernel/events/core.c95
-rw-r--r--kernel/events/internal.h3
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/irq/manage.c7
-rw-r--r--kernel/irq/spurious.c4
-rw-r--r--kernel/jump_label.c3
-rw-r--r--kernel/lockdep.c8
-rw-r--r--kernel/printk.c3
-rw-r--r--kernel/sched.c17
-rw-r--r--kernel/sched_fair.c159
-rw-r--r--kernel/sched_features.h1
-rw-r--r--kernel/sched_rt.c3
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/clockevents.c1
-rw-r--r--kernel/time/clocksource.c74
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/timekeeping.c92
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/trace/trace_events_filter.c13
-rw-r--r--lib/dma-debug.c2
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/huge_memory.c16
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page-writeback.c32
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/percpu-vm.c17
-rw-r--r--mm/percpu.c68
-rw-r--r--mm/slab.c5
-rw-r--r--mm/slub.c42
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c26
-rw-r--r--net/batman-adv/translation-table.c27
-rw-r--r--net/bluetooth/bnep/core.c8
-rw-r--r--net/bluetooth/cmtp/core.c5
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bridge/br_netlink.c6
-rw-r--r--net/bridge/br_stp.c29
-rw-r--r--net/caif/cffrml.c11
-rw-r--r--net/ceph/crush/mapper.c35
-rw-r--r--net/core/dev.c9
-rw-r--r--net/core/dev_addr_lists.c3
-rw-r--r--net/core/neighbour.c5
-rw-r--r--net/core/request_sock.c7
-rw-r--r--net/core/secure_seq.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/decnet/dn_route.c10
-rw-r--r--net/decnet/dn_timer.c17
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/igmp.c3
-rw-r--r--net/ipv4/inet_diag.c14
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_options.c5
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/netfilter.c3
-rw-r--r--net/ipv4/netfilter/Kconfig1
-rw-r--r--net/ipv4/route.c94
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/Kconfig1
-rw-r--r--net/ipv6/route.c23
-rw-r--r--net/ipv6/sit.c7
-rw-r--r--net/ipv6/tcp_ipv6.c13
-rw-r--r--net/ipv6/udp.c15
-rw-r--r--net/l2tp/l2tp_core.c2
-rw-r--r--net/mac80211/agg-tx.c128
-rw-r--r--net/mac80211/debugfs_sta.c4
-rw-r--r--net/mac80211/main.c6
-rw-r--r--net/mac80211/status.c8
-rw-r--r--net/mac80211/util.c1
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c2
-rw-r--r--net/netfilter/nf_conntrack_ecache.c37
-rw-r--r--net/netfilter/nf_conntrack_netlink.c73
-rw-r--r--net/netlabel/netlabel_kapi.c26
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_red.c4
-rw-r--r--net/sched/sch_teql.c31
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sunrpc/sched.c30
-rw-r--r--net/sunrpc/xprt.c10
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/reg.c49
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--security/apparmor/path.c65
-rw-r--r--security/integrity/evm/evm_crypto.c19
-rw-r--r--security/selinux/netport.c4
-rw-r--r--security/tomoyo/realpath.c13
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c2
-rw-r--r--sound/pci/hda/hda_codec.c6
-rw-r--r--sound/pci/hda/hda_eld.c28
-rw-r--r--sound/pci/hda/hda_intel.c5
-rw-r--r--sound/pci/hda/patch_cirrus.c32
-rw-r--r--sound/pci/hda/patch_hdmi.c16
-rw-r--r--sound/pci/hda/patch_realtek.c99
-rw-r--r--sound/pci/hda/patch_sigmatel.c75
-rw-r--r--sound/pci/hda/patch_via.c76
-rw-r--r--sound/pci/lx6464es/lx_core.c23
-rw-r--r--sound/pci/lx6464es/lx_core.h3
-rw-r--r--sound/pci/rme9652/hdspm.c2
-rw-r--r--sound/pci/sis7019.c64
-rw-r--r--sound/soc/atmel/Kconfig21
-rw-r--r--sound/soc/atmel/Makefile4
-rw-r--r--sound/soc/atmel/playpaq_wm8510.c473
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/ad1836.h2
-rw-r--r--sound/soc/codecs/adau1373.c2
-rw-r--r--sound/soc/codecs/cs4270.c10
-rw-r--r--sound/soc/codecs/cs4271.c8
-rw-r--r--sound/soc/codecs/cs42l51.c2
-rw-r--r--sound/soc/codecs/jz4740.c1
-rw-r--r--sound/soc/codecs/max9877.c10
-rw-r--r--sound/soc/codecs/rt5631.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c2
-rw-r--r--sound/soc/codecs/sta32x.c63
-rw-r--r--sound/soc/codecs/sta32x.h1
-rw-r--r--sound/soc/codecs/uda1380.c4
-rw-r--r--sound/soc/codecs/wm8731.c1
-rw-r--r--sound/soc/codecs/wm8753.c3
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c2
-rw-r--r--sound/soc/codecs/wm8962.c4
-rw-r--r--sound/soc/codecs/wm8993.c2
-rw-r--r--sound/soc/codecs/wm8994.c19
-rw-r--r--sound/soc/codecs/wm8996.c1
-rw-r--r--sound/soc/codecs/wm9081.c10
-rw-r--r--sound/soc/codecs/wm9090.c6
-rw-r--r--sound/soc/codecs/wm_hubs.c2
-rw-r--r--sound/soc/fsl/fsl_ssi.c1
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c24
-rw-r--r--sound/soc/imx/Kconfig2
-rw-r--r--sound/soc/kirkwood/Kconfig3
-rw-r--r--sound/soc/mxs/mxs-pcm.c3
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c1
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c3
-rw-r--r--sound/soc/pxa/Kconfig3
-rw-r--r--sound/soc/pxa/hx4700.c5
-rw-r--r--sound/soc/samsung/jive_wm8750.c3
-rw-r--r--sound/soc/samsung/smdk2443_wm9710.c1
-rw-r--r--sound/soc/samsung/smdk_wm8994.c1
-rw-r--r--sound/soc/samsung/speyside.c2
-rw-r--r--sound/soc/soc-core.c6
-rw-r--r--sound/soc/soc-utils.c31
-rw-r--r--sound/usb/quirks-table.h31
-rw-r--r--tools/perf/builtin-stat.c3
-rw-r--r--tools/perf/util/evsel.c10
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.c10
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/session.c4
-rw-r--r--tools/perf/util/trace-event-parse.c2
858 files changed, 11357 insertions, 6683 deletions
diff --git a/CREDITS b/CREDITS
index 07e32a87d95..44fce988eaa 100644
--- a/CREDITS
+++ b/CREDITS
@@ -688,10 +688,13 @@ S: Oxfordshire, UK.
N: Kees Cook
E: kees@outflux.net
-W: http://outflux.net/
-P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30 1975 1FFF 4BA9 1706 3E6D
-D: Minor updates to SCSI types, added /proc/pid/maps protection
+E: kees@ubuntu.com
+E: keescook@chromium.org
+W: http://outflux.net/blog/
+P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E 6E13 8972 F4DF DC6D C026
+D: Various security things, bug fixes, and documentation.
S: (ask for current address)
+S: Portland, Oregon
S: USA
N: Robin Cornelius
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
index fa72ccb2282..dbedafb095e 100644
--- a/Documentation/ABI/testing/sysfs-bus-rbd
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -57,13 +57,6 @@ create_snap
$ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
-rollback_snap
-
- Rolls back data to the specified snapshot. This goes over the entire
- list of rados blocks and sends a rollback command to each.
-
- $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
-
snap_*
A directory per each snapshot
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index e8552782b44..874921e9780 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -33,6 +33,7 @@ qcom Qualcomm, Inc.
ramtron Ramtron International
samsung Samsung Semiconductor
schindler Schindler
+sil Silicon Image
simtek
sirf SiRF Technology, Inc.
stericsson ST-Ericsson
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
index 64087c34327..7671352216f 100644
--- a/Documentation/filesystems/btrfs.txt
+++ b/Documentation/filesystems/btrfs.txt
@@ -63,8 +63,8 @@ IRC network.
Userspace tools for creating and manipulating Btrfs file systems are
available from the git repository at the following location:
- http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git
- git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git
+ http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs.git
+ git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs.git
These include the following tools:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a0c5c5f4fce..81c287fad79 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -315,12 +315,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
CPU-intensive style benchmark, and it can vary highly in
a microbenchmark depending on workload and compiler.
- 1: only for 32-bit processes
- 2: only for 64-bit processes
+ 32: only for 32-bit processes
+ 64: only for 64-bit processes
on: enable for both 32- and 64-bit processes
off: disable for both 32- and 64-bit processes
- amd_iommu= [HW,X86-84]
+ amd_iommu= [HW,X86-64]
Pass parameters to the AMD IOMMU driver in the system.
Possible values are:
fullflush - enable flushing of IO/TLB entries when
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index f049a1ca186..589f2da5d54 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -282,11 +282,11 @@ tcp_max_ssthresh - INTEGER
Default: 0 (off)
tcp_max_syn_backlog - INTEGER
- Maximal number of remembered connection requests, which are
- still did not receive an acknowledgment from connecting client.
- Default value is 1024 for systems with more than 128Mb of memory,
- and 128 for low memory machines. If server suffers of overload,
- try to increase this number.
+ Maximal number of remembered connection requests, which have not
+ received an acknowledgment from connecting client.
+ The minimal value is 128 for low memory machines, and it will
+ increase in proportion to the memory of machine.
+ If server suffers from overload, try increasing this number.
tcp_max_tw_buckets - INTEGER
Maximal number of timewait sockets held by system simultaneously.
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 646a89e0c07..3139fb505dc 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -123,9 +123,10 @@ please refer directly to the source code for more information about it.
Subsystem-Level Methods
-----------------------
The core methods to suspend and resume devices reside in struct dev_pm_ops
-pointed to by the pm member of struct bus_type, struct device_type and
-struct class. They are mostly of interest to the people writing infrastructure
-for buses, like PCI or USB, or device type and device class drivers.
+pointed to by the ops member of struct dev_pm_domain, or by the pm member of
+struct bus_type, struct device_type and struct class. They are mostly of
+interest to the people writing infrastructure for platforms and buses, like PCI
+or USB, or device type and device class drivers.
Bus drivers implement these methods as appropriate for the hardware and the
drivers using it; PCI works differently from USB, and so on. Not many people
@@ -139,41 +140,57 @@ sequencing in the driver model tree.
/sys/devices/.../power/wakeup files
-----------------------------------
-All devices in the driver model have two flags to control handling of wakeup
-events (hardware signals that can force the device and/or system out of a low
-power state). These flags are initialized by bus or device driver code using
+All device objects in the driver model contain fields that control the handling
+of system wakeup events (hardware signals that can force the system out of a
+sleep state). These fields are initialized by bus or device driver code using
device_set_wakeup_capable() and device_set_wakeup_enable(), defined in
include/linux/pm_wakeup.h.
-The "can_wakeup" flag just records whether the device (and its driver) can
+The "power.can_wakeup" flag just records whether the device (and its driver) can
physically support wakeup events. The device_set_wakeup_capable() routine
-affects this flag. The "should_wakeup" flag controls whether the device should
-try to use its wakeup mechanism. device_set_wakeup_enable() affects this flag;
-for the most part drivers should not change its value. The initial value of
-should_wakeup is supposed to be false for the majority of devices; the major
-exceptions are power buttons, keyboards, and Ethernet adapters whose WoL
-(wake-on-LAN) feature has been set up with ethtool. It should also default
-to true for devices that don't generate wakeup requests on their own but merely
-forward wakeup requests from one bus to another (like PCI bridges).
+affects this flag. The "power.wakeup" field is a pointer to an object of type
+struct wakeup_source used for controlling whether or not the device should use
+its system wakeup mechanism and for notifying the PM core of system wakeup
+events signaled by the device. This object is only present for wakeup-capable
+devices (i.e. devices whose "can_wakeup" flags are set) and is created (or
+removed) by device_set_wakeup_capable().
Whether or not a device is capable of issuing wakeup events is a hardware
matter, and the kernel is responsible for keeping track of it. By contrast,
whether or not a wakeup-capable device should issue wakeup events is a policy
decision, and it is managed by user space through a sysfs attribute: the
-power/wakeup file. User space can write the strings "enabled" or "disabled" to
-set or clear the "should_wakeup" flag, respectively. This file is only present
-for wakeup-capable devices (i.e. devices whose "can_wakeup" flags are set)
-and is created (or removed) by device_set_wakeup_capable(). Reads from the
-file will return the corresponding string.
-
-The device_may_wakeup() routine returns true only if both flags are set.
+"power/wakeup" file. User space can write the strings "enabled" or "disabled"
+to it to indicate whether or not, respectively, the device is supposed to signal
+system wakeup. This file is only present if the "power.wakeup" object exists
+for the given device and is created (or removed) along with that object, by
+device_set_wakeup_capable(). Reads from the file will return the corresponding
+string.
+
+The "power/wakeup" file is supposed to contain the "disabled" string initially
+for the majority of devices; the major exceptions are power buttons, keyboards,
+and Ethernet adapters whose WoL (wake-on-LAN) feature has been set up with
+ethtool. It should also default to "enabled" for devices that don't generate
+wakeup requests on their own but merely forward wakeup requests from one bus to
+another (like PCI Express ports).
+
+The device_may_wakeup() routine returns true only if the "power.wakeup" object
+exists and the corresponding "power/wakeup" file contains the string "enabled".
This information is used by subsystems, like the PCI bus type code, to see
whether or not to enable the devices' wakeup mechanisms. If device wakeup
mechanisms are enabled or disabled directly by drivers, they also should use
device_may_wakeup() to decide what to do during a system sleep transition.
-However for runtime power management, wakeup events should be enabled whenever
-the device and driver both support them, regardless of the should_wakeup flag.
-
+Device drivers, however, are not supposed to call device_set_wakeup_enable()
+directly in any case.
+
+It ought to be noted that system wakeup is conceptually different from "remote
+wakeup" used by runtime power management, although it may be supported by the
+same physical mechanism. Remote wakeup is a feature allowing devices in
+low-power states to trigger specific interrupts to signal conditions in which
+they should be put into the full-power state. Those interrupts may or may not
+be used to signal system wakeup events, depending on the hardware design. On
+some systems it is impossible to trigger them from system sleep states. In any
+case, remote wakeup should always be enabled for runtime power management for
+all devices and drivers that support it.
/sys/devices/.../power/control files
------------------------------------
@@ -249,20 +266,31 @@ for every device before the next phase begins. Not all busses or classes
support all these callbacks and not all drivers use all the callbacks. The
various phases always run after tasks have been frozen and before they are
unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
-been disabled (except for those marked with the IRQ_WAKEUP flag).
-
-All phases use bus, type, or class callbacks (that is, methods defined in
-dev->bus->pm, dev->type->pm, or dev->class->pm). These callbacks are mutually
-exclusive, so if the device type provides a struct dev_pm_ops object pointed to
-by its pm field (i.e. both dev->type and dev->type->pm are defined), the
-callbacks included in that object (i.e. dev->type->pm) will be used. Otherwise,
-if the class provides a struct dev_pm_ops object pointed to by its pm field
-(i.e. both dev->class and dev->class->pm are defined), the PM core will use the
-callbacks from that object (i.e. dev->class->pm). Finally, if the pm fields of
-both the device type and class objects are NULL (or those objects do not exist),
-the callbacks provided by the bus (that is, the callbacks from dev->bus->pm)
-will be used (this allows device types to override callbacks provided by bus
-types or classes if necessary).
+been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
+
+All phases use PM domain, bus, type, or class callbacks (that is, methods
+defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
+These callbacks are regarded by the PM core as mutually exclusive. Moreover,
+PM domain callbacks always take precedence over bus, type and class callbacks,
+while type callbacks take precedence over bus and class callbacks, and class
+callbacks take precedence over bus callbacks. To be precise, the following
+rules are used to determine which callback to execute in the given phase:
+
+ 1. If dev->pm_domain is present, the PM core will attempt to execute the
+ callback included in dev->pm_domain->ops. If that callback is not
+ present, no action will be carried out for the given device.
+
+ 2. Otherwise, if both dev->type and dev->type->pm are present, the callback
+ included in dev->type->pm will be executed.
+
+ 3. Otherwise, if both dev->class and dev->class->pm are present, the
+ callback included in dev->class->pm will be executed.
+
+ 4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
+ included in dev->bus->pm will be executed.
+
+This allows PM domains and device types to override callbacks provided by bus
+types or device classes if necessary.
These callbacks may in turn invoke device- or driver-specific methods stored in
dev->driver->pm, but they don't have to.
@@ -283,9 +311,8 @@ When the system goes into the standby or memory sleep state, the phases are:
After the prepare callback method returns, no new children may be
registered below the device. The method may also prepare the device or
- driver in some way for the upcoming system power transition (for
- example, by allocating additional memory required for this purpose), but
- it should not put the device into a low-power state.
+ driver in some way for the upcoming system power transition, but it
+ should not put the device into a low-power state.
2. The suspend methods should quiesce the device to stop it from performing
I/O. They also may save the device registers and put it into the
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 5336149f831..c2ae8bf77d4 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -44,25 +44,33 @@ struct dev_pm_ops {
};
The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks
-are executed by the PM core for either the power domain, or the device type
-(if the device power domain's struct dev_pm_ops does not exist), or the class
-(if the device power domain's and type's struct dev_pm_ops object does not
-exist), or the bus type (if the device power domain's, type's and class'
-struct dev_pm_ops objects do not exist) of the given device, so the priority
-order of callbacks from high to low is that power domain callbacks, device
-type callbacks, class callbacks and bus type callbacks, and the high priority
-one will take precedence over low priority one. The bus type, device type and
-class callbacks are referred to as subsystem-level callbacks in what follows,
-and generally speaking, the power domain callbacks are used for representing
-power domains within a SoC.
+are executed by the PM core for the device's subsystem that may be either of
+the following:
+
+ 1. PM domain of the device, if the device's PM domain object, dev->pm_domain,
+ is present.
+
+ 2. Device type of the device, if both dev->type and dev->type->pm are present.
+
+ 3. Device class of the device, if both dev->class and dev->class->pm are
+ present.
+
+ 4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
+
+The PM core always checks which callback to use in the order given above, so the
+priority order of callbacks from high to low is: PM domain, device type, class
+and bus type. Moreover, the high-priority one will always take precedence over
+a low-priority one. The PM domain, bus type, device type and class callbacks
+are referred to as subsystem-level callbacks in what follows.
By default, the callbacks are always invoked in process context with interrupts
enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
-to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
-callbacks should be invoked in atomic context with interrupts disabled.
-This implies that these callback routines must not block or sleep, but it also
-means that the synchronous helper functions listed at the end of Section 4 can
-be used within an interrupt handler or in an atomic context.
+to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
+->runtime_idle() callbacks may be invoked in atomic context with interrupts
+disabled for a given device. This implies that the callback routines in
+question must not block or sleep, but it also means that the synchronous helper
+functions listed at the end of Section 4 may be used for that device within an
+interrupt handler or generally in an atomic context.
The subsystem-level suspend callback is _entirely_ _responsible_ for handling
the suspend of the device as appropriate, which may, but need not include
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt
index 3e2ec9cbf39..d50c14df341 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/alsa/soc/machine.txt
@@ -50,8 +50,7 @@ Machine DAI Configuration
The machine DAI configuration glues all the codec and CPU DAIs together. It can
also be used to set up the DAI system clock and for any machine related DAI
initialisation e.g. the machine audio map can be connected to the codec audio
-map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c
-for examples.
+map, unconnected codec pins can be set as such.
struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
@@ -83,8 +82,7 @@ Machine Power Map
The machine driver can optionally extend the codec power map and to become an
audio power map of the audio subsystem. This allows for automatic power up/down
of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack
-sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for
-details.
+sockets in the machine init function.
Machine Controls
diff --git a/Documentation/usb/linux-cdc-acm.inf b/Documentation/usb/linux-cdc-acm.inf
index 37a02ce5484..f0ffc27d4c0 100644
--- a/Documentation/usb/linux-cdc-acm.inf
+++ b/Documentation/usb/linux-cdc-acm.inf
@@ -90,10 +90,10 @@ ServiceBinary=%12%\USBSER.sys
[SourceDisksFiles]
[SourceDisksNames]
[DeviceList]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
[DeviceList.NTamd64]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
;------------------------------------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 3523ab000f1..6afba60c390 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -511,8 +511,8 @@ M: Joerg Roedel <joerg.roedel@amd.com>
L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
S: Supported
-F: arch/x86/kernel/amd_iommu*.c
-F: arch/x86/include/asm/amd_iommu*.h
+F: drivers/iommu/amd_iommu*.[ch]
+F: include/linux/amd-iommu.h
AMD MICROCODE UPDATE SUPPORT
M: Andreas Herrmann <andreas.herrmann3@amd.com>
@@ -789,6 +789,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.pengutronix.de/git/imx/linux-2.6.git
F: arch/arm/mach-mx*/
+F: arch/arm/mach-imx/
F: arch/arm/plat-mxc/
ARM/FREESCALE IMX51
@@ -804,6 +805,13 @@ S: Maintained
T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
F: arch/arm/mach-imx/*imx6*
+ARM/FREESCALE MXS ARM ARCHITECTURE
+M: Shawn Guo <shawn.guo@linaro.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
+F: arch/arm/mach-mxs/
+
ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1046,35 +1054,18 @@ ARM/SAMSUNG ARM ARCHITECTURES
M: Ben Dooks <ben-linux@fluff.org>
M: Kukjin Kim <kgene.kim@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/plat-samsung/
F: arch/arm/plat-s3c24xx/
F: arch/arm/plat-s5p/
+F: arch/arm/mach-s3c24*/
+F: arch/arm/mach-s3c64xx/
F: drivers/*/*s3c2410*
F: drivers/*/*/*s3c2410*
-
-ARM/S3C2410 ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c2410/
-
-ARM/S3C244x ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c2440/
-F: arch/arm/mach-s3c2443/
-
-ARM/S3C64xx ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c64xx/
+F: drivers/spi/spi-s3c*
+F: sound/soc/samsung/*
ARM/S5P EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene.kim@samsung.com>
@@ -3110,6 +3101,7 @@ F: include/linux/hid*
HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
M: Thomas Gleixner <tglx@linutronix.de>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Maintained
F: Documentation/timers/
F: kernel/hrtimer.c
@@ -3619,7 +3611,7 @@ F: net/irda/
IRQ SUBSYSTEM
M: Thomas Gleixner <tglx@linutronix.de>
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: kernel/irq/
ISAPNP
@@ -4107,7 +4099,7 @@ F: drivers/hwmon/lm90.c
LOCKDEP AND LOCKSTAT
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
S: Maintained
F: Documentation/lockdep*.txt
F: Documentation/lockstat.txt
@@ -4289,7 +4281,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
S: Maintained
F: Documentation/dvb/
F: Documentation/video4linux/
+F: Documentation/DocBook/media/
F: drivers/media/
+F: drivers/staging/media/
F: include/media/
F: include/linux/dvb/
F: include/linux/videodev*.h
@@ -4311,8 +4305,9 @@ F: include/linux/mm.h
F: mm/
MEMORY RESOURCE CONTROLLER
+M: Johannes Weiner <hannes@cmpxchg.org>
+M: Michal Hocko <mhocko@suse.cz>
M: Balbir Singh <bsingharora@gmail.com>
-M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
@@ -5094,6 +5089,7 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Supported
F: kernel/events/*
F: include/linux/perf_event.h
@@ -5173,6 +5169,7 @@ F: drivers/scsi/pm8001/
POSIX CLOCKS and TIMERS
M: Thomas Gleixner <tglx@linutronix.de>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Supported
F: fs/timerfd.c
F: include/linux/timer*
@@ -5667,7 +5664,6 @@ F: drivers/media/video/*7146*
F: include/media/*7146*
SAMSUNG AUDIO (ASoC) DRIVERS
-M: Jassi Brar <jassisinghbrar@gmail.com>
M: Sangbeom Kim <sbkim73@samsung.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
@@ -5689,6 +5685,7 @@ F: drivers/dma/dw_dmac.c
TIMEKEEPING, NTP
M: John Stultz <johnstul@us.ibm.com>
M: Thomas Gleixner <tglx@linutronix.de>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Supported
F: include/linux/clocksource.h
F: include/linux/time.h
@@ -5713,6 +5710,7 @@ F: drivers/watchdog/sc1200wdt.c
SCHEDULER
M: Ingo Molnar <mingo@elte.hu>
M: Peter Zijlstra <peterz@infradead.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
S: Maintained
F: kernel/sched*
F: include/linux/sched.h
@@ -6640,7 +6638,7 @@ TRACING
M: Steven Rostedt <rostedt@goodmis.org>
M: Frederic Weisbecker <fweisbec@gmail.com>
M: Ingo Molnar <mingo@redhat.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Maintained
F: Documentation/trace/ftrace.txt
F: arch/*/*/*/ftrace.h
@@ -7390,7 +7388,7 @@ M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@redhat.com>
M: "H. Peter Anvin" <hpa@zytor.com>
M: x86@kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
S: Maintained
F: Documentation/x86/
F: arch/x86/
diff --git a/Makefile b/Makefile
index dab8610c4d6..a43733df397 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc6
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 44789eff983..776d76b8cb6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -220,8 +220,9 @@ config NEED_MACH_MEMORY_H
be avoided when possible.
config PHYS_OFFSET
- hex "Physical address of main memory"
+ hex "Physical address of main memory" if MMU
depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
+ default DRAM_BASE if !MMU
help
Please provide the physical address corresponding to the
location of main memory in your system.
@@ -1231,7 +1232,7 @@ config ARM_ERRATA_742231
capabilities of the processor.
config PL310_ERRATA_588369
- bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
+ bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0
help
The PL310 L2 cache controller implements three types of Clean &
@@ -1256,7 +1257,7 @@ config ARM_ERRATA_720789
entries regardless of the ASID.
config PL310_ERRATA_727915
- bool "Background Clean & Invalidate by Way operation can cause data corruption"
+ bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
depends on CACHE_L2X0
help
PL310 implements the Clean & Invalidate by Way L2 cache maintenance
@@ -1289,8 +1290,8 @@ config ARM_ERRATA_751472
operation is received by a CPU before the ICIALLUIS has completed,
potentially leading to corrupted entries in the cache or TLB.
-config ARM_ERRATA_753970
- bool "ARM errata: cache sync operation may be faulty"
+config PL310_ERRATA_753970
+ bool "PL310 errata: cache sync operation may be faulty"
depends on CACHE_PL310
help
This option enables the workaround for the 753970 PL310 (r3p0) erratum.
@@ -1352,6 +1353,18 @@ config ARM_ERRATA_764369
relevant cache maintenance functions and sets a specific bit
in the diagnostic control register of the SCU.
+config PL310_ERRATA_769419
+ bool "PL310 errata: no automatic Store Buffer drain"
+ depends on CACHE_L2X0
+ help
+ On revisions of the PL310 prior to r3p2, the Store Buffer does
+ not automatically drain. This can cause normal, non-cacheable
+ writes to be retained when the memory system is idle, leading
+ to suboptimal I/O performance for drivers using coherent DMA.
+ This option adds a write barrier to the cpu_idle loop so that,
+ on systems with an outer cache, the store buffer is drained
+ explicitly.
+
endmenu
source "arch/arm/common/Kconfig"
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 0e6ae470c94..410a546060a 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -526,7 +526,8 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
sizeof(u32));
BUG_ON(!gic->saved_ppi_conf);
- cpu_pm_register_notifier(&gic_notifier_block);
+ if (gic == &gic_data[0])
+ cpu_pm_register_notifier(&gic_notifier_block);
}
#else
static void __init gic_pm_init(struct gic_chip_data *gic)
@@ -581,13 +582,16 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
* For primary GICs, skip over SGIs.
* For secondary GICs, skip over PPIs, too.
*/
+ domain->hwirq_base = 32;
if (gic_nr == 0) {
gic_cpu_base_addr = cpu_base;
- domain->hwirq_base = 16;
- if (irq_start > 0)
- irq_start = (irq_start & ~31) + 16;
- } else
- domain->hwirq_base = 32;
+
+ if ((irq_start & 31) > 0) {
+ domain->hwirq_base = 16;
+ if (irq_start != -1)
+ irq_start = (irq_start & ~31) + 16;
+ }
+ }
/*
* Find out how many interrupts are supported.
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index 7129cfbdacd..f407a6b35d3 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -1211,8 +1211,8 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
- ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT);
- ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT);
+ ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
+ ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
ccr |= (rqc->swap << CC_SWAP_SHFT);
@@ -1623,6 +1623,11 @@ static inline int _alloc_event(struct pl330_thread *thrd)
return -1;
}
+static bool _chan_ns(const struct pl330_info *pi, int i)
+{
+ return pi->pcfg.irq_ns & (1 << i);
+}
+
/* Upon success, returns IdentityToken for the
* allocated channel, NULL otherwise.
*/
@@ -1647,7 +1652,8 @@ void *pl330_request_channel(const struct pl330_info *pi)
for (i = 0; i < chans; i++) {
thrd = &pl330->channels[i];
- if (thrd->free) {
+ if ((thrd->free) && (!_manager_ns(thrd) ||
+ _chan_ns(pi, i))) {
thrd->ev = _alloc_event(thrd);
if (thrd->ev >= 0) {
thrd->free = false;
diff --git a/arch/arm/configs/at91cap9adk_defconfig b/arch/arm/configs/at91cap9_defconfig
index ffb1edd9336..8826eb218e7 100644
--- a/arch/arm/configs/at91cap9adk_defconfig
+++ b/arch/arm/configs/at91cap9_defconfig
@@ -38,7 +38,6 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
@@ -52,16 +51,12 @@ CONFIG_MTD_NAND_ATMEL=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
@@ -81,7 +76,6 @@ CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
@@ -99,7 +93,6 @@ CONFIG_MMC_AT91=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/at91rm9200_defconfig b/arch/arm/configs/at91rm9200_defconfig
index 38cb7c98542..bbe4e1a1f5d 100644
--- a/arch/arm/configs/at91rm9200_defconfig
+++ b/arch/arm/configs/at91rm9200_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
@@ -56,7 +55,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -75,18 +73,8 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_HIDP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_AFS_PARTS=y
CONFIG_MTD_CHAR=y
@@ -108,8 +96,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_TCLIB=y
-CONFIG_EEPROM_LEGACY=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=m
@@ -119,14 +105,23 @@ CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_TUN=m
+CONFIG_ARM_AT91_ETHER=y
CONFIG_PHYLIB=y
CONFIG_DAVICOM_PHY=y
CONFIG_SMSC_PHY=y
CONFIG_MICREL_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=y
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
@@ -139,18 +134,6 @@ CONFIG_USB_NET_RNDIS_HOST=m
CONFIG_USB_ALI_M5632=y
CONFIG_USB_AN2720=y
CONFIG_USB_EPSON2888=y
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
@@ -158,9 +141,9 @@ CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_LEGACY_PTY_COUNT=32
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=32
CONFIG_HW_RANDOM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -290,7 +273,6 @@ CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_SMB_FS=m
CONFIG_CIFS=m
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
@@ -335,7 +317,6 @@ CONFIG_NLS_UTF8=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_FTRACE is not set
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_SHA1=y
diff --git a/arch/arm/configs/at91sam9260ek_defconfig b/arch/arm/configs/at91sam9260_defconfig
index f8a9226413b..505b3765f87 100644
--- a/arch/arm/configs/at91sam9260ek_defconfig
+++ b/arch/arm/configs/at91sam9260_defconfig
@@ -12,11 +12,23 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91SAM9260=y
+CONFIG_ARCH_AT91SAM9260_SAM9XE=y
CONFIG_MACH_AT91SAM9260EK=y
+CONFIG_MACH_CAM60=y
+CONFIG_MACH_SAM9_L9260=y
+CONFIG_MACH_AFEB9260=y
+CONFIG_MACH_USB_A9260=y
+CONFIG_MACH_QIL_A9260=y
+CONFIG_MACH_CPU9260=y
+CONFIG_MACH_FLEXIBITY=y
+CONFIG_MACH_SNAPPER_9260=y
+CONFIG_MACH_AT91SAM_DT=y
CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
# CONFIG_ARM_THUMB is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
CONFIG_FPE_NWFPE=y
CONFIG_NET=y
@@ -33,12 +45,10 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_MACB=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
@@ -55,7 +65,6 @@ CONFIG_I2C_GPIO=y
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_AT91SAM9X_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
# CONFIG_USB_HID is not set
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
@@ -71,7 +80,6 @@ CONFIG_USB_G_SERIAL=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
diff --git a/arch/arm/configs/at91sam9g20ek_defconfig b/arch/arm/configs/at91sam9g20_defconfig
index 9e90e6d7929..9123568d9a8 100644
--- a/arch/arm/configs/at91sam9g20ek_defconfig
+++ b/arch/arm/configs/at91sam9g20_defconfig
@@ -14,6 +14,15 @@ CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91SAM9G20=y
CONFIG_MACH_AT91SAM9G20EK=y
CONFIG_MACH_AT91SAM9G20EK_2MMC=y
+CONFIG_MACH_CPU9G20=y
+CONFIG_MACH_ACMENETUSFOXG20=y
+CONFIG_MACH_PORTUXG20=y
+CONFIG_MACH_STAMP9G20=y
+CONFIG_MACH_PCONTROL_G20=y
+CONFIG_MACH_GSIA18S=y
+CONFIG_MACH_USB_A9G20=y
+CONFIG_MACH_SNAPPER_9260=y
+CONFIG_MACH_AT91SAM_DT=y
CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
# CONFIG_ARM_THUMB is not set
CONFIG_AEABI=y
@@ -21,9 +30,10 @@ CONFIG_LEDS=y
CONFIG_LEDS_CPU=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -37,8 +47,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
@@ -48,17 +56,13 @@ CONFIG_MTD_NAND_ATMEL=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
@@ -66,15 +70,14 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
+CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
CONFIG_HW_RANDOM=y
CONFIG_SPI=y
CONFIG_SPI_ATMEL=y
CONFIG_SPI_SPIDEV=y
# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=y
@@ -82,7 +85,6 @@ CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
CONFIG_SND_SEQUENCER_OSS=y
# CONFIG_SND_VERBOSE_PROCFS is not set
-CONFIG_SND_AT73C213=y
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
@@ -105,7 +107,6 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arm/configs/at91sam9g45_defconfig b/arch/arm/configs/at91sam9g45_defconfig
index c5876d244f4..606d48f3b8f 100644
--- a/arch/arm/configs/at91sam9g45_defconfig
+++ b/arch/arm/configs/at91sam9g45_defconfig
@@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91SAM9G45=y
CONFIG_MACH_AT91SAM9M10G45EK=y
+CONFIG_MACH_AT91SAM_DT=y
CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
CONFIG_AT91_SLOW_CLOCK=y
CONFIG_AEABI=y
@@ -73,11 +74,8 @@ CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_MII=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_DAVICOM_PHY=y
CONFIG_LIBERTAS_THINFIRM=m
CONFIG_LIBERTAS_THINFIRM_USB=m
CONFIG_AT76C50X_USB=m
@@ -131,7 +129,6 @@ CONFIG_I2C_GPIO=y
CONFIG_SPI=y
CONFIG_SPI_ATMEL=y
# CONFIG_HWMON is not set
-# CONFIG_MFD_SUPPORT is not set
CONFIG_FB=y
CONFIG_FB_ATMEL=y
CONFIG_FB_UDL=m
diff --git a/arch/arm/configs/at91sam9rlek_defconfig b/arch/arm/configs/at91sam9rl_defconfig
index 75621e4d03f..ad562ee6420 100644
--- a/arch/arm/configs/at91sam9rlek_defconfig
+++ b/arch/arm/configs/at91sam9rl_defconfig
@@ -23,8 +23,6 @@ CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
@@ -35,7 +33,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
CONFIG_BLK_DEV_RAM_SIZE=24576
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
@@ -62,13 +59,11 @@ CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_AT91SAM9X_WATCHDOG=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_MMC=y
CONFIG_MMC_AT91=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index 227a477346e..d95763d5f0d 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -287,7 +287,7 @@ CONFIG_USB=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_MMC=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 176ec22af03..fd996bb1302 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -263,7 +263,7 @@ CONFIG_USB=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_MMC=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index a88e64d4e9a..443675d317e 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -132,7 +132,7 @@ CONFIG_USB_MON=m
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_VBUS_DRAW=500
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_USB_GADGETFS=m
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 7b63462b349..945a34f2a34 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -48,13 +48,7 @@ CONFIG_MACH_SX1=y
CONFIG_MACH_NOKIA770=y
CONFIG_MACH_AMS_DELTA=y
CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
-CONFIG_OMAP_ARM_216MHZ=y
-CONFIG_OMAP_ARM_195MHZ=y
-CONFIG_OMAP_ARM_192MHZ=y
CONFIG_OMAP_ARM_182MHZ=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
# CONFIG_ARM_THUMB is not set
CONFIG_PCCARD=y
CONFIG_OMAP_CF=y
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 4a5a12681be..374000ec4e4 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -14,8 +14,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_U300=y
CONFIG_MACH_U300=y
CONFIG_MACH_U300_BS335=y
-CONFIG_MACH_U300_DUAL_RAM=y
-CONFIG_U300_DEBUG=y
CONFIG_MACH_U300_SPIDUMMY=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -26,19 +24,21 @@ CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072"
CONFIG_CPU_IDLE=y
CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
# CONFIG_SUSPEND is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_MISC_DEVICES is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSMC=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
+CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
# CONFIG_HWMON is not set
@@ -51,6 +51,7 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
CONFIG_MMC_ARMMMCI=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
@@ -65,10 +66,8 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_CRC32 is not set
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 97d31a4663d..2d7b6e7b727 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -10,7 +10,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_U8500=y
CONFIG_UX500_SOC_DB5500=y
CONFIG_UX500_SOC_DB8500=y
-CONFIG_MACH_U8500=y
+CONFIG_MACH_HREFV60=y
CONFIG_MACH_SNOWBALL=y
CONFIG_MACH_U5500=y
CONFIG_NO_HZ=y
@@ -24,6 +24,7 @@ CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_VFP=y
CONFIG_NEON=y
+CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -41,11 +42,8 @@ CONFIG_MISC_DEVICES=y
CONFIG_AB8500_PWM=y
CONFIG_SENSORS_BH1780=y
CONFIG_NETDEVICES=y
-CONFIG_SMSC_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_SMSC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_SMSC_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
@@ -72,15 +70,12 @@ CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_STMPE=y
CONFIG_GPIO_TC3589X=y
-# CONFIG_HWMON is not set
CONFIG_MFD_STMPE=y
CONFIG_MFD_TC3589X=y
+CONFIG_AB5500_CORE=y
CONFIG_AB8500_CORE=y
CONFIG_REGULATOR_AB8500=y
# CONFIG_HID_SUPPORT is not set
-CONFIG_USB_MUSB_HDRC=y
-CONFIG_USB_GADGET_MUSB_HDRC=y
-CONFIG_MUSB_PIO_ONLY=y
CONFIG_USB_GADGET=y
CONFIG_AB8500_USB=y
CONFIG_MMC=y
@@ -97,6 +92,7 @@ CONFIG_DMADEVICES=y
CONFIG_STE_DMA40=y
CONFIG_STAGING=y
CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
+CONFIG_HSEM_U8500=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 59577ad3f4e..547a3c1e59d 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -140,7 +140,7 @@ CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 71d99b83cdb..0bda22c094a 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -55,16 +55,6 @@ reserve_pmu(enum arm_pmu_type type);
extern void
release_pmu(enum arm_pmu_type type);
-/**
- * init_pmu() - Initialise the PMU.
- *
- * Initialise the system ready for PMU enabling. This should typically set the
- * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
- * the actual hardware initialisation.
- */
-extern int
-init_pmu(enum arm_pmu_type type);
-
#else /* CONFIG_CPU_HAS_PMU */
#include <linux/err.h>
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index a7e457ed27c..58b8b84adcd 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -25,7 +25,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+const struct cpumask *cpu_coregroup_mask(int cpu);
#else
diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h
index a5edf421005..d1c3f3a71c9 100644
--- a/arch/arm/include/asm/unwind.h
+++ b/arch/arm/include/asm/unwind.h
@@ -30,14 +30,15 @@ enum unwind_reason_code {
};
struct unwind_idx {
- unsigned long addr;
+ unsigned long addr_offset;
unsigned long insn;
};
struct unwind_table {
struct list_head list;
- struct unwind_idx *start;
- struct unwind_idx *stop;
+ const struct unwind_idx *start;
+ const struct unwind_idx *origin;
+ const struct unwind_idx *stop;
unsigned long begin_addr;
unsigned long end_addr;
};
@@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
extern void unwind_table_del(struct unwind_table *tab);
extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
-#ifdef CONFIG_ARM_UNWIND
-extern int __init unwind_init(void);
-#else
-static inline int __init unwind_init(void)
-{
- return 0;
-}
-#endif
-
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_ARM_UNWIND
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 9ad50c4208a..b145f16c91b 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -497,7 +497,7 @@ ENDPROC(__und_usr)
.popsection
.pushsection __ex_table,"a"
.long 1b, 4b
-#if __LINUX_ARM_ARCH__ >= 7
+#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
.long 2b, 4b
.long 3b, 4b
#endif
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c
index 9fe8910308a..8a30c89da70 100644
--- a/arch/arm/kernel/kprobes-arm.c
+++ b/arch/arm/kernel/kprobes-arm.c
@@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = {
static const union decode_item arm_cccc_0001_____1001_table[] = {
/* Synchronization primitives */
+#if __LINUX_ARM_ARCH__ < 6
+ /* Deprecated on ARMv6 and may be UNDEFINED on v7 */
/* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
REGS(NOPC, NOPC, 0, 0, NOPC)),
-
+#endif
/* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
/* And unallocated instructions... */
DECODE_END
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index fc82de8bdcc..ba32b393b3f 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void)
TEST_GROUP("Synchronization primitives")
- /*
- * Use hard coded constants for SWP instructions to avoid warnings
- * about deprecated instructions.
- */
- TEST_RP( ".word 0xe108e097 @ swp lr, r",7,VAL2,", [r",8,0,"]")
- TEST_R( ".word 0x610d0091 @ swpvs r0, r",1,VAL1,", [sp]")
- TEST_RP( ".word 0xe10cd09e @ swp sp, r",14,VAL2,", [r",12,13*4,"]")
+#if __LINUX_ARM_ARCH__ < 6
+ TEST_RP("swp lr, r",7,VAL2,", [r",8,0,"]")
+ TEST_R( "swpvs r0, r",1,VAL1,", [sp]")
+ TEST_RP("swp sp, r",14,VAL2,", [r",12,13*4,"]")
+#else
+ TEST_UNSUPPORTED(".word 0xe108e097 @ swp lr, r7, [r8]")
+ TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs r0, r1, [sp]")
+ TEST_UNSUPPORTED(".word 0xe10cd09e @ swp sp, r14 [r12]")
+#endif
TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
- TEST_RP( ".word 0xe148e097 @ swpb lr, r",7,VAL2,", [r",8,0,"]")
- TEST_R( ".word 0x614d0091 @ swpvsb r0, r",1,VAL1,", [sp]")
+#if __LINUX_ARM_ARCH__ < 6
+ TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]")
+ TEST_R( "swpvsb r0, r",1,VAL1,", [sp]")
+#else
+ TEST_UNSUPPORTED(".word 0xe148e097 @ swpb lr, r7, [r8]")
+ TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb r0, r1, [sp]")
+#endif
TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */
@@ -550,7 +557,7 @@ void kprobe_arm_test_cases(void)
TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]")
TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
- TEST_RPR( "strd r",2, VAL1,", [r",3, 24,"], r",4,48,"")
+ TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"")
TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!")
diff --git a/arch/arm/kernel/kprobes-test-thumb.c b/arch/arm/kernel/kprobes-test-thumb.c
index 5e726c31c45..5d8b8579222 100644
--- a/arch/arm/kernel/kprobes-test-thumb.c
+++ b/arch/arm/kernel/kprobes-test-thumb.c
@@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void)
DONT_TEST_IN_ITBLOCK(
TEST_BF_R( "cbnz r",0,0, ", 2f")
TEST_BF_R( "cbz r",2,-1,", 2f")
- TEST_BF_RX( "cbnz r",4,1, ", 2f",0x20)
- TEST_BF_RX( "cbz r",7,0, ", 2f",0x40)
+ TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20)
+ TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40)
)
TEST_R("sxth r0, r",7, HH1,"")
TEST_R("sxth r7, r",0, HH2,"")
@@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK(
TESTCASE_START(code) \
TEST_ARG_PTR(13, offset) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code,0) \
+ TEST_BRANCH_F(code) \
TESTCASE_END
TEST("push {r0}")
@@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8,
TEST_BF( "b 2f")
TEST_BB( "b 2b")
- TEST_BF_X("b 2f", 0x400)
- TEST_BB_X("b 2b", 0x400)
+ TEST_BF_X("b 2f", SPACE_0x400)
+ TEST_BB_X("b 2b", SPACE_0x400)
TEST_GROUP("Testing instructions in IT blocks")
@@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22,
TEST_BB("bne.w 2b")
TEST_BF("bgt.w 2f")
TEST_BB("blt.w 2b")
- TEST_BF_X("bpl.w 2f",0x1000)
+ TEST_BF_X("bpl.w 2f", SPACE_0x1000)
)
TEST_UNSUPPORTED("msr cpsr, r0")
@@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22,
TEST_BF( "b.w 2f")
TEST_BB( "b.w 2b")
- TEST_BF_X("b.w 2f", 0x1000)
+ TEST_BF_X("b.w 2f", SPACE_0x1000)
TEST_BF( "bl.w 2f")
TEST_BB( "bl.w 2b")
- TEST_BB_X("bl.w 2b", 0x1000)
+ TEST_BB_X("bl.w 2b", SPACE_0x1000)
TEST_X( "blx __dummy_arm_subroutine",
".arm \n\t"
diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h
index 0dc5d77b935..e28a869b1ae 100644
--- a/arch/arm/kernel/kprobes-test.h
+++ b/arch/arm/kernel/kprobes-test.h
@@ -149,23 +149,31 @@ struct test_arg_end {
"1: "instruction" \n\t" \
" nop \n\t"
-#define TEST_BRANCH_F(instruction, xtra_dist) \
+#define TEST_BRANCH_F(instruction) \
TEST_INSTRUCTION(instruction) \
- ".if "#xtra_dist" \n\t" \
" b 99f \n\t" \
- ".space "#xtra_dist" \n\t" \
- ".endif \n\t" \
+ "2: nop \n\t"
+
+#define TEST_BRANCH_B(instruction) \
+ " b 50f \n\t" \
+ " b 99f \n\t" \
+ "2: nop \n\t" \
+ " b 99f \n\t" \
+ TEST_INSTRUCTION(instruction)
+
+#define TEST_BRANCH_FX(instruction, codex) \
+ TEST_INSTRUCTION(instruction) \
+ " b 99f \n\t" \
+ codex" \n\t" \
" b 99f \n\t" \
"2: nop \n\t"
-#define TEST_BRANCH_B(instruction, xtra_dist) \
+#define TEST_BRANCH_BX(instruction, codex) \
" b 50f \n\t" \
" b 99f \n\t" \
"2: nop \n\t" \
" b 99f \n\t" \
- ".if "#xtra_dist" \n\t" \
- ".space "#xtra_dist" \n\t" \
- ".endif \n\t" \
+ codex" \n\t" \
TEST_INSTRUCTION(instruction)
#define TESTCASE_END \
@@ -301,47 +309,60 @@ struct test_arg_end {
TESTCASE_START(code1 #reg1 code2) \
TEST_ARG_PTR(reg1, val1) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code1 #reg1 code2, 0) \
+ TEST_BRANCH_F(code1 #reg1 code2) \
TESTCASE_END
-#define TEST_BF_X(code, xtra_dist) \
+#define TEST_BF(code) \
TESTCASE_START(code) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code, xtra_dist) \
+ TEST_BRANCH_F(code) \
TESTCASE_END
-#define TEST_BB_X(code, xtra_dist) \
+#define TEST_BB(code) \
TESTCASE_START(code) \
TEST_ARG_END("") \
- TEST_BRANCH_B(code, xtra_dist) \
+ TEST_BRANCH_B(code) \
TESTCASE_END
-#define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \
- TESTCASE_START(code1 #reg code2) \
- TEST_ARG_REG(reg, val) \
- TEST_ARG_END("") \
- TEST_BRANCH_F(code1 #reg code2, xtra_dist) \
+#define TEST_BF_R(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_F(code1 #reg code2) \
TESTCASE_END
-#define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \
- TESTCASE_START(code1 #reg code2) \
- TEST_ARG_REG(reg, val) \
- TEST_ARG_END("") \
- TEST_BRANCH_B(code1 #reg code2, xtra_dist) \
+#define TEST_BB_R(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_B(code1 #reg code2) \
TESTCASE_END
-#define TEST_BF(code) TEST_BF_X(code, 0)
-#define TEST_BB(code) TEST_BB_X(code, 0)
-
-#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0)
-#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0)
-
#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
TEST_ARG_REG(reg1, val1) \
TEST_ARG_REG(reg2, val2) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \
+ TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3) \
+ TESTCASE_END
+
+#define TEST_BF_X(code, codex) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_FX(code, codex) \
+ TESTCASE_END
+
+#define TEST_BB_X(code, codex) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_BX(code, codex) \
+ TESTCASE_END
+
+#define TEST_BF_RX(code1, reg, val, code2, codex) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_FX(code1 #reg code2, codex) \
TESTCASE_END
#define TEST_X(code, codex) \
@@ -372,6 +393,25 @@ struct test_arg_end {
TESTCASE_END
+/*
+ * Macros for defining space directives spread over multiple lines.
+ * These are required so the compiler guesses better the length of inline asm
+ * code and will spill the literal pool early enough to avoid generating PC
+ * relative loads with out of range offsets.
+ */
+#define TWICE(x) x x
+#define SPACE_0x8 TWICE(".space 4\n\t")
+#define SPACE_0x10 TWICE(SPACE_0x8)
+#define SPACE_0x20 TWICE(SPACE_0x10)
+#define SPACE_0x40 TWICE(SPACE_0x20)
+#define SPACE_0x80 TWICE(SPACE_0x40)
+#define SPACE_0x100 TWICE(SPACE_0x80)
+#define SPACE_0x200 TWICE(SPACE_0x100)
+#define SPACE_0x400 TWICE(SPACE_0x200)
+#define SPACE_0x800 TWICE(SPACE_0x400)
+#define SPACE_0x1000 TWICE(SPACE_0x800)
+
+
/* Various values used in test cases... */
#define N(val) (val ^ 0xffffffff)
#define VAL1 0x12345678
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 24e2347be6b..88b0941ce51 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -343,19 +343,25 @@ validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct pmu_hw_events fake_pmu;
+ DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
- memset(&fake_pmu, 0, sizeof(fake_pmu));
+ /*
+ * Initialise the fake PMU. We only need to populate the
+ * used_mask for the purposes of validation.
+ */
+ memset(fake_used_mask, 0, sizeof(fake_used_mask));
+ fake_pmu.used_mask = fake_used_mask;
if (!validate_event(&fake_pmu, leader))
- return -ENOSPC;
+ return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_pmu, sibling))
- return -ENOSPC;
+ return -EINVAL;
}
if (!validate_event(&fake_pmu, event))
- return -ENOSPC;
+ return -EINVAL;
return 0;
}
@@ -396,6 +402,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
int i, err, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
+ if (!pmu_device)
+ return -ENODEV;
+
err = reserve_pmu(armpmu->type);
if (err) {
pr_warning("unable to reserve pmu\n");
@@ -631,6 +640,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = {
static int __devinit armpmu_device_probe(struct platform_device *pdev)
{
+ if (!cpu_pmu)
+ return -ENODEV;
+
cpu_pmu->plat_device = pdev;
return 0;
}
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2c3407ee857..2334bf8a650 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type)
{
clear_bit_unlock(type, pmu_lock);
}
+EXPORT_SYMBOL_GPL(release_pmu);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 75316f0dd02..3d0c6fb74ae 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -192,6 +192,9 @@ void cpu_idle(void)
#endif
local_irq_disable();
+#ifdef CONFIG_PL310_ERRATA_769419
+ wmb();
+#endif
if (hlt_counter) {
local_irq_enable();
cpu_relax();
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3448a3f9cc8..8fc2c8fcbdc 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -895,8 +895,6 @@ void __init setup_arch(char **cmdline_p)
{
struct machine_desc *mdesc;
- unwind_init();
-
setup_processor();
mdesc = setup_machine_fdt(__atags_pointer);
if (!mdesc)
@@ -904,6 +902,12 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc;
machine_name = mdesc->name;
+#ifdef CONFIG_ZONE_DMA
+ if (mdesc->dma_zone_size) {
+ extern unsigned long arm_dma_zone_size;
+ arm_dma_zone_size = mdesc->dma_zone_size;
+ }
+#endif
if (mdesc->soft_reboot)
reboot_setup("s");
@@ -934,12 +938,6 @@ void __init setup_arch(char **cmdline_p)
tcm_init();
-#ifdef CONFIG_ZONE_DMA
- if (mdesc->dma_zone_size) {
- extern unsigned long arm_dma_zone_size;
- arm_dma_zone_size = mdesc->dma_zone_size;
- }
-#endif
#ifdef CONFIG_MULTI_IRQ_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 1040c00405d..8200deaa14f 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -43,7 +43,7 @@
struct cputopo_arm cpu_topology[NR_CPUS];
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_sibling;
}
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index e7e8365795c..00df012c467 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
struct unwind_ctrl_block {
unsigned long vrs[16]; /* virtual register set */
- unsigned long *insn; /* pointer to the current instructions word */
+ const unsigned long *insn; /* pointer to the current instructions word */
int entries; /* number of entries left to interpret */
int byte; /* current byte number in the instructions word */
};
@@ -83,8 +83,9 @@ enum regs {
PC = 15
};
-extern struct unwind_idx __start_unwind_idx[];
-extern struct unwind_idx __stop_unwind_idx[];
+extern const struct unwind_idx __start_unwind_idx[];
+static const struct unwind_idx *__origin_unwind_idx;
+extern const struct unwind_idx __stop_unwind_idx[];
static DEFINE_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
@@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables);
})
/*
- * Binary search in the unwind index. The entries entries are
+ * Binary search in the unwind index. The entries are
* guaranteed to be sorted in ascending order by the linker.
+ *
+ * start = first entry
+ * origin = first entry with positive offset (or stop if there is no such entry)
+ * stop - 1 = last entry
*/
-static struct unwind_idx *search_index(unsigned long addr,
- struct unwind_idx *first,
- struct unwind_idx *last)
+static const struct unwind_idx *search_index(unsigned long addr,
+ const struct unwind_idx *start,
+ const struct unwind_idx *origin,
+ const struct unwind_idx *stop)
{
- pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
+ unsigned long addr_prel31;
+
+ pr_debug("%s(%08lx, %p, %p, %p)\n",
+ __func__, addr, start, origin, stop);
+
+ /*
+ * only search in the section with the matching sign. This way the
+ * prel31 numbers can be compared as unsigned longs.
+ */
+ if (addr < (unsigned long)start)
+ /* negative offsets: [start; origin) */
+ stop = origin;
+ else
+ /* positive offsets: [origin; stop) */
+ start = origin;
+
+ /* prel31 for address relavive to start */
+ addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
- if (addr < first->addr) {
+ while (start < stop - 1) {
+ const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+ /*
+ * As addr_prel31 is relative to start an offset is needed to
+ * make it relative to mid.
+ */
+ if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
+ mid->addr_offset)
+ stop = mid;
+ else {
+ /* keep addr_prel31 relative to start */
+ addr_prel31 -= ((unsigned long)mid -
+ (unsigned long)start);
+ start = mid;
+ }
+ }
+
+ if (likely(start->addr_offset <= addr_prel31))
+ return start;
+ else {
pr_warning("unwind: Unknown symbol address %08lx\n", addr);
return NULL;
- } else if (addr >= last->addr)
- return last;
+ }
+}
- while (first < last - 1) {
- struct unwind_idx *mid = first + ((last - first + 1) >> 1);
+static const struct unwind_idx *unwind_find_origin(
+ const struct unwind_idx *start, const struct unwind_idx *stop)
+{
+ pr_debug("%s(%p, %p)\n", __func__, start, stop);
+ while (start < stop) {
+ const struct unwind_idx *mid = start + ((stop - start) >> 1);
- if (addr < mid->addr)
- last = mid;
+ if (mid->addr_offset >= 0x40000000)
+ /* negative offset */
+ start = mid + 1;
else
- first = mid;
+ /* positive offset */
+ stop = mid;
}
-
- return first;
+ pr_debug("%s -> %p\n", __func__, stop);
+ return stop;
}
-static struct unwind_idx *unwind_find_idx(unsigned long addr)
+static const struct unwind_idx *unwind_find_idx(unsigned long addr)
{
- struct unwind_idx *idx = NULL;
+ const struct unwind_idx *idx = NULL;
unsigned long flags;
pr_debug("%s(%08lx)\n", __func__, addr);
- if (core_kernel_text(addr))
+ if (core_kernel_text(addr)) {
+ if (unlikely(!__origin_unwind_idx))
+ __origin_unwind_idx =
+ unwind_find_origin(__start_unwind_idx,
+ __stop_unwind_idx);
+
/* main unwind table */
idx = search_index(addr, __start_unwind_idx,
- __stop_unwind_idx - 1);
- else {
+ __origin_unwind_idx,
+ __stop_unwind_idx);
+ } else {
/* module unwind tables */
struct unwind_table *table;
@@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
if (addr >= table->begin_addr &&
addr < table->end_addr) {
idx = search_index(addr, table->start,
- table->stop - 1);
+ table->origin,
+ table->stop);
/* Move-to-front to exploit common traces */
list_move(&table->list, &unwind_tables);
break;
@@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
int unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
- struct unwind_idx *idx;
+ const struct unwind_idx *idx;
struct unwind_ctrl_block ctrl;
/* only go to a higher address on the stack */
@@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
unsigned long text_size)
{
unsigned long flags;
- struct unwind_idx *idx;
struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
@@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
if (!tab)
return tab;
- tab->start = (struct unwind_idx *)start;
- tab->stop = (struct unwind_idx *)(start + size);
+ tab->start = (const struct unwind_idx *)start;
+ tab->stop = (const struct unwind_idx *)(start + size);
+ tab->origin = unwind_find_origin(tab->start, tab->stop);
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
- /* Convert the symbol addresses to absolute values */
- for (idx = tab->start; idx < tab->stop; idx++)
- idx->addr = prel31_to_addr(&idx->addr);
-
spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
spin_unlock_irqrestore(&unwind_lock, flags);
@@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab)
kfree(tab);
}
-
-int __init unwind_init(void)
-{
- struct unwind_idx *idx;
-
- /* Convert the symbol addresses to absolute values */
- for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
- idx->addr = prel31_to_addr(&idx->addr);
-
- pr_debug("unwind: ARM stack unwinding initialised\n");
-
- return 0;
-}
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 10d868a5a48..d6408d1ee54 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,5 +1,9 @@
+#include <asm/unwind.h>
+
#if __LINUX_ARM_ARCH__ >= 6
- .macro bitop, instr
+ .macro bitop, name, instr
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
mov r2, #1
@@ -13,9 +17,13 @@
cmp r0, #0
bne 1b
bx lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
- .macro testop, instr, store
+ .macro testop, name, instr, store
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
mov r2, #1
@@ -34,9 +42,13 @@
cmp r0, #0
movne r0, #1
2: bx lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
#else
- .macro bitop, instr
+ .macro bitop, name, instr
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
and r2, r0, #31
@@ -49,6 +61,8 @@
str r2, [r1, r0, lsl #2]
restore_irqs ip
mov pc, lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
/**
@@ -59,7 +73,9 @@
* Note: we can trivially conditionalise the store instruction
* to avoid dirtying the data cache.
*/
- .macro testop, instr, store
+ .macro testop, name, instr, store
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
and r3, r0, #31
@@ -73,5 +89,7 @@
moveq r0, #0
restore_irqs ip
mov pc, lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
#endif
diff --git a/arch/arm/lib/changebit.S b/arch/arm/lib/changebit.S
index 68ed5b62e83..f4027862172 100644
--- a/arch/arm/lib/changebit.S
+++ b/arch/arm/lib/changebit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_change_bit)
- bitop eor
-ENDPROC(_change_bit)
+bitop _change_bit, eor
diff --git a/arch/arm/lib/clearbit.S b/arch/arm/lib/clearbit.S
index 4c04c3b51ee..f6b75fb64d3 100644
--- a/arch/arm/lib/clearbit.S
+++ b/arch/arm/lib/clearbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_clear_bit)
- bitop bic
-ENDPROC(_clear_bit)
+bitop _clear_bit, bic
diff --git a/arch/arm/lib/setbit.S b/arch/arm/lib/setbit.S
index bbee5c66a23..618fedae4b3 100644
--- a/arch/arm/lib/setbit.S
+++ b/arch/arm/lib/setbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_set_bit)
- bitop orr
-ENDPROC(_set_bit)
+bitop _set_bit, orr
diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S
index 15a4d431f22..4becdc3a59c 100644
--- a/arch/arm/lib/testchangebit.S
+++ b/arch/arm/lib/testchangebit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_test_and_change_bit)
- testop eor, str
-ENDPROC(_test_and_change_bit)
+testop _test_and_change_bit, eor, str
diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S
index 521b66b5b95..918841dcce7 100644
--- a/arch/arm/lib/testclearbit.S
+++ b/arch/arm/lib/testclearbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_test_and_clear_bit)
- testop bicne, strne
-ENDPROC(_test_and_clear_bit)
+testop _test_and_clear_bit, bicne, strne
diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S
index 1c98cc2185b..8d1b2fe9e48 100644
--- a/arch/arm/lib/testsetbit.S
+++ b/arch/arm/lib/testsetbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_test_and_set_bit)
- testop orreq, streq
-ENDPROC(_test_and_set_bit)
+testop _test_and_set_bit, orreq, streq
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 66591fa53e0..ad930688358 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index b84a9f642f5..0d20677fbef 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -195,9 +195,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
- CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
- CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
- CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
/* more usart lookup table for DT entries */
CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 25e3464fb07..629fa977497 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index ae78f4d03b7..a178b58b0b9 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index ad017eb1f8d..d5fbac9ff4f 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h
index 8f4866045b4..ec164a4124c 100644
--- a/arch/arm/mach-at91/include/mach/system_rev.h
+++ b/arch/arm/mach-at91/include/mach/system_rev.h
@@ -19,7 +19,7 @@
#define BOARD_HAVE_NAND_16BIT (1 << 31)
static inline int board_have_nand_16bit(void)
{
- return system_rev & BOARD_HAVE_NAND_16BIT;
+ return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0;
}
#endif /* __ARCH_SYSTEM_REV_H__ */
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 1d7d2499522..6659a90dbca 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = {
.num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
.tdm_slots = 2,
.serial_dir = da850_iis_serializer_direction,
- .asp_chan_q = EVENTQ_1,
+ .asp_chan_q = EVENTQ_0,
.version = MCASP_VERSION_2,
.txnumevt = 1,
.rxnumevt = 1,
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 1918ae71142..46e1f4173b9 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = {
/* UBL (a few copies) plus U-Boot */
.name = "bootloader",
.offset = 0,
- .size = 28 * NAND_BLOCK_SIZE,
+ .size = 30 * NAND_BLOCK_SIZE,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
/* U-Boot environment */
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index e574d7f837a..635bf774015 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
int val;
u32 value;
- if (!vpif_vsclkdis_reg || !cpld_client)
+ if (!vpif_vidclkctl_reg || !cpld_client)
return -ENXIO;
val = i2c_smbus_read_byte(cpld_client);
@@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
return val;
spin_lock_irqsave(&vpif_reg_lock, flags);
- value = __raw_readl(vpif_vsclkdis_reg);
+ value = __raw_readl(vpif_vidclkctl_reg);
if (mux_mode) {
val &= VPIF_INPUT_TWO_CHANNEL;
value |= VIDCH1CLK;
@@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
val |= VPIF_INPUT_ONE_CHANNEL;
value &= ~VIDCH1CLK;
}
- __raw_writel(value, vpif_vsclkdis_reg);
+ __raw_writel(value, vpif_vidclkctl_reg);
spin_unlock_irqrestore(&vpif_reg_lock, flags);
err = i2c_smbus_write_byte(cpld_client, val);
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 0b68ed534f8..af27c130595 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -161,7 +161,6 @@ static struct clk dsp_clk = {
.name = "dsp",
.parent = &pll1_sysclk1,
.lpsc = DM646X_LPSC_C64X_CPU,
- .flags = PSC_DSP,
.usecount = 1, /* REVISIT how to disable? */
};
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h
index fa59c097223..8bc3fc25617 100644
--- a/arch/arm/mach-davinci/include/mach/psc.h
+++ b/arch/arm/mach-davinci/include/mach/psc.h
@@ -233,7 +233,7 @@
#define PTCMD 0x120
#define PTSTAT 0x128
#define PDSTAT 0x200
-#define PDCTL1 0x304
+#define PDCTL 0x300
#define MDSTAT 0x800
#define MDCTL 0xA00
@@ -244,7 +244,10 @@
#define PSC_STATE_ENABLE 3
#define MDSTAT_STATE_MASK 0x3f
+#define PDSTAT_STATE_MASK 0x1f
#define MDCTL_FORCE BIT(31)
+#define PDCTL_NEXT BIT(1)
+#define PDCTL_EPCGOOD BIT(8)
#ifndef __ASSEMBLER__
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c
index 1fb6bdff38c..d7e210f4b55 100644
--- a/arch/arm/mach-davinci/psc.c
+++ b/arch/arm/mach-davinci/psc.c
@@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
void davinci_psc_config(unsigned int domain, unsigned int ctlr,
unsigned int id, bool enable, u32 flags)
{
- u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
+ u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
void __iomem *psc_base;
struct davinci_soc_info *soc_info = &davinci_soc_info;
u32 next_state = PSC_STATE_ENABLE;
@@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
mdctl |= MDCTL_FORCE;
__raw_writel(mdctl, psc_base + MDCTL + 4 * id);
- pdstat = __raw_readl(psc_base + PDSTAT);
- if ((pdstat & 0x00000001) == 0) {
- pdctl1 = __raw_readl(psc_base + PDCTL1);
- pdctl1 |= 0x1;
- __raw_writel(pdctl1, psc_base + PDCTL1);
+ pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
+ if ((pdstat & PDSTAT_STATE_MASK) == 0) {
+ pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+ pdctl |= PDCTL_NEXT;
+ __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
ptcmd = 1 << domain;
__raw_writel(ptcmd, psc_base + PTCMD);
@@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
epcpr = __raw_readl(psc_base + EPCPR);
} while ((((epcpr >> domain) & 1) == 0));
- pdctl1 = __raw_readl(psc_base + PDCTL1);
- pdctl1 |= 0x100;
- __raw_writel(pdctl1, psc_base + PDCTL1);
+ pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+ pdctl |= PDCTL_EPCGOOD;
+ __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
} else {
ptcmd = 1 << domain;
__raw_writel(ptcmd, psc_base + PTCMD);
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 35f6502144a..4ebb382c597 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -12,6 +12,8 @@
#include <linux/init.h>
#include <linux/cpuidle.h>
#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/time.h>
#include <asm/proc-fns.h>
diff --git a/arch/arm/mach-exynos/mct.c b/arch/arm/mach-exynos/mct.c
index 97343df8f13..85b5527d091 100644
--- a/arch/arm/mach-exynos/mct.c
+++ b/arch/arm/mach-exynos/mct.c
@@ -44,8 +44,6 @@ struct mct_clock_event_device {
char name[10];
};
-static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
-
static void exynos4_mct_write(unsigned int value, void *addr)
{
void __iomem *stat_addr;
@@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void)
}
#ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
/* Clock event handling */
static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
{
@@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
void local_timer_stop(struct clock_event_device *evt)
{
+ unsigned int cpu = smp_processor_id();
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
if (mct_int_type == MCT_INT_SPI)
- disable_irq(evt->irq);
+ if (cpu == 0)
+ remove_irq(evt->irq, &mct_tick0_event_irq);
+ else
+ remove_irq(evt->irq, &mct_tick1_event_irq);
else
disable_percpu_irq(IRQ_MCT_LOCALTIMER);
}
@@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void)
clk_rate = clk_get_rate(mct_clk);
+#ifdef CONFIG_LOCAL_TIMERS
if (mct_int_type == MCT_INT_PPI) {
int err;
@@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void)
WARN(err, "MCT: can't request IRQ %d (%d)\n",
IRQ_MCT_LOCALTIMER, err);
}
+#endif /* CONFIG_LOCAL_TIMERS */
}
static void __init exynos4_timer_init(void)
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index b82dcf08e74..88660d500f5 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/unified.h>
@@ -72,6 +73,9 @@ static void __init highbank_map_io(void)
void highbank_set_cpu_jump(int cpu, void *jump_addr)
{
+#ifdef CONFIG_SMP
+ cpu = cpu_logical_map(cpu);
+#endif
writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu));
__cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 5f7f9c2a34a..c44aa974e79 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -10,11 +10,6 @@ config HAVE_IMX_MMDC
config HAVE_IMX_SRC
bool
-#
-# ARCH_MX31 and ARCH_MX35 are left for compatibility
-# Some usages assume that having one of them implies not having (e.g.) ARCH_MX2.
-# To easily distinguish good and reviewed from unreviewed usages new (and IMHO
-# more sensible) names are used: SOC_IMX31 and SOC_IMX35
config ARCH_MX1
bool
@@ -27,12 +22,6 @@ config ARCH_MX25
config MACH_MX27
bool
-config ARCH_MX31
- bool
-
-config ARCH_MX35
- bool
-
config SOC_IMX1
bool
select ARCH_MX1
@@ -72,7 +61,6 @@ config SOC_IMX31
select CPU_V6
select IMX_HAVE_PLATFORM_MXC_RNGA
select ARCH_MXC_AUDMUX_V2
- select ARCH_MX31
select MXC_AVIC
select SMP_ON_UP if SMP
@@ -82,7 +70,6 @@ config SOC_IMX35
select ARCH_MXC_IOMUX_V3
select ARCH_MXC_AUDMUX_V2
select HAVE_EPIT
- select ARCH_MX35
select MXC_AVIC
select SMP_ON_UP if SMP
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
index 613a1b993bf..039a7abb165 100644
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ b/arch/arm/mach-imx/clock-imx6q.c
@@ -1953,14 +1953,17 @@ static struct map_desc imx6q_clock_desc[] = {
imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
};
+void __init imx6q_clock_map_io(void)
+{
+ iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
+}
+
int __init mx6q_clocks_init(void)
{
struct device_node *np;
void __iomem *base;
int i, irq;
- iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
-
/* retrieve the freqency of fixed clocks from device tree */
for_each_compatible_node(np, NULL, "fixed-clock") {
u32 rate;
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 8bf5fa34948..8deb012189b 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -34,16 +34,18 @@ static void __init imx6q_map_io(void)
{
imx_lluart_map_io();
imx_scu_map_io();
+ imx6q_clock_map_io();
}
-static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
+static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 7; /* imx6q gets 7 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx6q_irq_match[] __initconst = {
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 9f0e82ec339..31807d2a8b7 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -33,29 +33,32 @@
static void imx3_idle(void)
{
unsigned long reg = 0;
- __asm__ __volatile__(
- /* disable I and D cache */
- "mrc p15, 0, %0, c1, c0, 0\n"
- "bic %0, %0, #0x00001000\n"
- "bic %0, %0, #0x00000004\n"
- "mcr p15, 0, %0, c1, c0, 0\n"
- /* invalidate I cache */
- "mov %0, #0\n"
- "mcr p15, 0, %0, c7, c5, 0\n"
- /* clear and invalidate D cache */
- "mov %0, #0\n"
- "mcr p15, 0, %0, c7, c14, 0\n"
- /* WFI */
- "mov %0, #0\n"
- "mcr p15, 0, %0, c7, c0, 4\n"
- "nop\n" "nop\n" "nop\n" "nop\n"
- "nop\n" "nop\n" "nop\n"
- /* enable I and D cache */
- "mrc p15, 0, %0, c1, c0, 0\n"
- "orr %0, %0, #0x00001000\n"
- "orr %0, %0, #0x00000004\n"
- "mcr p15, 0, %0, c1, c0, 0\n"
- : "=r" (reg));
+
+ if (!need_resched())
+ __asm__ __volatile__(
+ /* disable I and D cache */
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ "bic %0, %0, #0x00001000\n"
+ "bic %0, %0, #0x00000004\n"
+ "mcr p15, 0, %0, c1, c0, 0\n"
+ /* invalidate I cache */
+ "mov %0, #0\n"
+ "mcr p15, 0, %0, c7, c5, 0\n"
+ /* clear and invalidate D cache */
+ "mov %0, #0\n"
+ "mcr p15, 0, %0, c7, c14, 0\n"
+ /* WFI */
+ "mov %0, #0\n"
+ "mcr p15, 0, %0, c7, c0, 4\n"
+ "nop\n" "nop\n" "nop\n" "nop\n"
+ "nop\n" "nop\n" "nop\n"
+ /* enable I and D cache */
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ "orr %0, %0, #0x00001000\n"
+ "orr %0, %0, #0x00000004\n"
+ "mcr p15, 0, %0, c1, c0, 0\n"
+ : "=r" (reg));
+ local_irq_enable();
}
static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size,
@@ -108,6 +111,7 @@ void imx3_init_l2x0(void)
l2x0_init(l2x0_base, 0x00030024, 0x00000000);
}
+#ifdef CONFIG_SOC_IMX31
static struct map_desc mx31_io_desc[] __initdata = {
imx_map_entry(MX31, X_MEMC, MT_DEVICE),
imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED),
@@ -126,33 +130,11 @@ void __init mx31_map_io(void)
iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc));
}
-static struct map_desc mx35_io_desc[] __initdata = {
- imx_map_entry(MX35, X_MEMC, MT_DEVICE),
- imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
- imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
- imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
- imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
-};
-
-void __init mx35_map_io(void)
-{
- iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
-}
-
void __init imx31_init_early(void)
{
mxc_set_cpu_type(MXC_CPU_MX31);
mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
- imx_idle = imx3_idle;
- imx_ioremap = imx3_ioremap;
-}
-
-void __init imx35_init_early(void)
-{
- mxc_set_cpu_type(MXC_CPU_MX35);
- mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
- mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
- imx_idle = imx3_idle;
+ pm_idle = imx3_idle;
imx_ioremap = imx3_ioremap;
}
@@ -161,11 +143,6 @@ void __init mx31_init_irq(void)
mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR));
}
-void __init mx35_init_irq(void)
-{
- mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
-}
-
static struct sdma_script_start_addrs imx31_to1_sdma_script __initdata = {
.per_2_per_addr = 1677,
};
@@ -199,6 +176,35 @@ void __init imx31_soc_init(void)
imx_add_imx_sdma("imx31-sdma", MX31_SDMA_BASE_ADDR, MX31_INT_SDMA, &imx31_sdma_pdata);
}
+#endif /* ifdef CONFIG_SOC_IMX31 */
+
+#ifdef CONFIG_SOC_IMX35
+static struct map_desc mx35_io_desc[] __initdata = {
+ imx_map_entry(MX35, X_MEMC, MT_DEVICE),
+ imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
+ imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
+ imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
+ imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
+};
+
+void __init mx35_map_io(void)
+{
+ iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
+}
+
+void __init imx35_init_early(void)
+{
+ mxc_set_cpu_type(MXC_CPU_MX35);
+ mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
+ mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
+ pm_idle = imx3_idle;
+ imx_ioremap = imx3_ioremap;
+}
+
+void __init mx35_init_irq(void)
+{
+ mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
+}
static struct sdma_script_start_addrs imx35_to1_sdma_script __initdata = {
.ap_2_ap_addr = 642,
@@ -254,3 +260,4 @@ void __init imx35_soc_init(void)
imx_add_imx_sdma("imx35-sdma", MX35_SDMA_BASE_ADDR, MX35_INT_SDMA, &imx35_sdma_pdata);
}
+#endif /* ifdef CONFIG_SOC_IMX35 */
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index 36cacbd0dcc..a8e33681b73 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/smp.h>
#include <asm/unified.h>
#define SRC_SCR 0x000
@@ -23,10 +24,15 @@
static void __iomem *src_base;
+#ifndef CONFIG_SMP
+#define cpu_logical_map(cpu) 0
+#endif
+
void imx_enable_cpu(int cpu, bool enable)
{
u32 mask, val;
+ cpu = cpu_logical_map(cpu);
mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
val = readl_relaxed(src_base + SRC_SCR);
val = enable ? val | mask : val & ~mask;
@@ -35,6 +41,7 @@ void imx_enable_cpu(int cpu, bool enable)
void imx_set_cpu_jump(int cpu, void *jump_addr)
{
+ cpu = cpu_logical_map(cpu);
writel_relaxed(BSYM(virt_to_phys(jump_addr)),
src_base + SRC_GPR1 + cpu * 8);
}
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index 69156568bc4..4665767a4f7 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -182,7 +182,7 @@ static void __init gplugd_init(void)
/* on-chip devices */
pxa168_add_uart(3);
- pxa168_add_ssp(0);
+ pxa168_add_ssp(1);
pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(gplugd_i2c_board_info));
pxa168_add_eth(&gplugd_eth_platform_data);
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
index d14eeaf1632..99b4ce1b656 100644
--- a/arch/arm/mach-mmp/include/mach/gpio-pxa.h
+++ b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
@@ -7,7 +7,7 @@
#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x) (GPIO_REGS_VIRT + (x))
+#define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
#define NR_BUILTIN_GPIO IRQ_GPIO_NUM
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index 24030d0da6e..0fb7a17df39 100644
--- a/arch/arm/mach-msm/devices-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
+#include <linux/module.h>
#include <mach/irqs.h>
#include <mach/iommu.h>
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index 5c837603ff0..24994bb5214 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void)
{
iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
- PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+ PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
imx51_soc_init();
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c
index 6bea31ab8f8..64bbfcea6f3 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-mx5/board-mx53_evk.c
@@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void)
gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_evk_fec_pdata = {
+static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c
index 7678f7734db..237bdecd933 100644
--- a/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/arch/arm/mach-mx5/board-mx53_loco.c
@@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void)
gpio_set_value(LOCO_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_loco_fec_data = {
+static const struct fec_platform_data mx53_loco_fec_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c
index 59c0845eb4a..d42132a80e8 100644
--- a/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/arch/arm/mach-mx5/board-mx53_smd.c
@@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void)
gpio_set_value(SMD_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_smd_fec_data = {
+static const struct fec_platform_data mx53_smd_fec_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-mx5/cpu.c
index 5c5328257dc..5e2e7a84386 100644
--- a/arch/arm/mach-mx5/cpu.c
+++ b/arch/arm/mach-mx5/cpu.c
@@ -16,7 +16,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <mach/hardware.h>
-#include <asm/io.h>
+#include <linux/io.h>
static int mx5_cpu_rev = -1;
@@ -67,7 +67,8 @@ static int __init mx51_neon_fixup(void)
if (!cpu_is_mx51())
return 0;
- if (mx51_revision() < IMX_CHIP_REVISION_3_0 && (elf_hwcap & HWCAP_NEON)) {
+ if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
+ (elf_hwcap & HWCAP_NEON)) {
elf_hwcap &= ~HWCAP_NEON;
pr_info("Turning off NEON support, detected broken NEON implementation\n");
}
diff --git a/arch/arm/mach-mx5/imx51-dt.c b/arch/arm/mach-mx5/imx51-dt.c
index ccc61585659..596edd967db 100644
--- a/arch/arm/mach-mx5/imx51-dt.c
+++ b/arch/arm/mach-mx5/imx51-dt.c
@@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
{ /* sentinel */ }
};
-static void __init imx51_tzic_add_irq_domain(struct device_node *np,
+static int __init imx51_tzic_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
irq_domain_add_simple(np, 0);
+ return 0;
}
-static void __init imx51_gpio_add_irq_domain(struct device_node *np,
+static int __init imx51_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 4; /* imx51 gets 4 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx51_irq_match[] __initconst = {
diff --git a/arch/arm/mach-mx5/imx53-dt.c b/arch/arm/mach-mx5/imx53-dt.c
index ccaa0b81b76..85bfd5ff21b 100644
--- a/arch/arm/mach-mx5/imx53-dt.c
+++ b/arch/arm/mach-mx5/imx53-dt.c
@@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
{ /* sentinel */ }
};
-static void __init imx53_tzic_add_irq_domain(struct device_node *np,
+static int __init imx53_tzic_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
irq_domain_add_simple(np, 0);
+ return 0;
}
-static void __init imx53_gpio_add_irq_domain(struct device_node *np,
+static int __init imx53_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 7; /* imx53 gets 7 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx53_irq_match[] __initconst = {
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index 26eacc9d0d9..df4a508f240 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -23,7 +23,9 @@
static void imx5_idle(void)
{
- mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+ if (!need_resched())
+ mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+ local_irq_enable();
}
/*
@@ -89,7 +91,7 @@ void __init imx51_init_early(void)
mxc_set_cpu_type(MXC_CPU_MX51);
mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR));
mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR));
- imx_idle = imx5_idle;
+ pm_idle = imx5_idle;
}
void __init imx53_init_early(void)
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
index 229ae349421..da6e4aad177 100644
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ b/arch/arm/mach-mxs/clock-mx28.c
@@ -404,7 +404,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
reg &= ~BM_CLKCTRL_##dr##_DIV; \
reg |= div << BP_CLKCTRL_##dr##_DIV; \
- if (reg | (1 << clk->enable_shift)) { \
+ if (reg & (1 << clk->enable_shift)) { \
pr_err("%s: clock is gated\n", __func__); \
return -EINVAL; \
} \
diff --git a/arch/arm/mach-mxs/include/mach/mx28.h b/arch/arm/mach-mxs/include/mach/mx28.h
index 75d86118b76..30c7990f3c0 100644
--- a/arch/arm/mach-mxs/include/mach/mx28.h
+++ b/arch/arm/mach-mxs/include/mach/mx28.h
@@ -104,8 +104,8 @@
#define MX28_INT_CAN1 9
#define MX28_INT_LRADC_TOUCH 10
#define MX28_INT_HSADC 13
-#define MX28_INT_IRADC_THRESH0 14
-#define MX28_INT_IRADC_THRESH1 15
+#define MX28_INT_LRADC_THRESH0 14
+#define MX28_INT_LRADC_THRESH1 15
#define MX28_INT_LRADC_CH0 16
#define MX28_INT_LRADC_CH1 17
#define MX28_INT_LRADC_CH2 18
diff --git a/arch/arm/mach-mxs/include/mach/mxs.h b/arch/arm/mach-mxs/include/mach/mxs.h
index 0d2d2b47099..bde5f663474 100644
--- a/arch/arm/mach-mxs/include/mach/mxs.h
+++ b/arch/arm/mach-mxs/include/mach/mxs.h
@@ -30,6 +30,7 @@
*/
#define cpu_is_mx23() ( \
machine_is_mx23evk() || \
+ machine_is_stmp378x() || \
0)
#define cpu_is_mx28() ( \
machine_is_mx28evk() || \
diff --git a/arch/arm/mach-mxs/mach-m28evk.c b/arch/arm/mach-mxs/mach-m28evk.c
index 3b1681e4f49..6b00577b702 100644
--- a/arch/arm/mach-mxs/mach-m28evk.c
+++ b/arch/arm/mach-mxs/mach-m28evk.c
@@ -361,6 +361,6 @@ static struct sys_timer m28evk_timer = {
MACHINE_START(M28EVK, "DENX M28 EVK")
.map_io = mx28_map_io,
.init_irq = mx28_init_irq,
- .init_machine = m28evk_init,
.timer = &m28evk_timer,
+ .init_machine = m28evk_init,
MACHINE_END
diff --git a/arch/arm/mach-mxs/mach-stmp378x_devb.c b/arch/arm/mach-mxs/mach-stmp378x_devb.c
index 177e53123a0..6834dea38c0 100644
--- a/arch/arm/mach-mxs/mach-stmp378x_devb.c
+++ b/arch/arm/mach-mxs/mach-stmp378x_devb.c
@@ -115,6 +115,6 @@ static struct sys_timer stmp378x_dvb_timer = {
MACHINE_START(STMP378X, "STMP378X")
.map_io = mx23_map_io,
.init_irq = mx23_init_irq,
- .init_machine = stmp378x_dvb_init,
.timer = &stmp378x_dvb_timer,
+ .init_machine = stmp378x_dvb_init,
MACHINE_END
diff --git a/arch/arm/mach-mxs/module-tx28.c b/arch/arm/mach-mxs/module-tx28.c
index 0fcff47009c..9a7b08b2a92 100644
--- a/arch/arm/mach-mxs/module-tx28.c
+++ b/arch/arm/mach-mxs/module-tx28.c
@@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = {
MX28_PAD_ENET0_CRS__ENET1_RX_EN,
};
-static struct fec_platform_data tx28_fec0_data = {
+static const struct fec_platform_data tx28_fec0_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
-static struct fec_platform_data tx28_fec1_data = {
+static const struct fec_platform_data tx28_fec1_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index e0a028161dd..73f287d6429 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -171,14 +171,6 @@ config MACH_OMAP_GENERIC
comment "OMAP CPU Speed"
depends on ARCH_OMAP1
-config OMAP_CLOCKS_SET_BY_BOOTLOADER
- bool "OMAP clocks set by bootloader"
- depends on ARCH_OMAP1
- help
- Enable this option to prevent the kernel from overriding the clock
- frequencies programmed by bootloader for MPU, DSP, MMUs, TC,
- internal LCD controller and MPU peripherals.
-
config OMAP_ARM_216MHZ
bool "OMAP ARM 216 MHz CPU (1710 only)"
depends on ARCH_OMAP1 && ARCH_OMAP16XX
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 51bae31cf36..b0f15d234a1 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -302,8 +302,6 @@ static void __init ams_delta_init(void)
omap_cfg_reg(J19_1610_CAM_D6);
omap_cfg_reg(J18_1610_CAM_D7);
- iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
-
omap_board_config = ams_delta_config;
omap_board_config_size = ARRAY_SIZE(ams_delta_config);
omap_serial_init();
@@ -373,10 +371,16 @@ static int __init ams_delta_modem_init(void)
}
arch_initcall(ams_delta_modem_init);
+static void __init ams_delta_map_io(void)
+{
+ omap15xx_map_io();
+ iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
+}
+
MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
/* Maintainer: Jonathan McDowell <noodles@earth.li> */
.atag_offset = 0x100,
- .map_io = omap15xx_map_io,
+ .map_io = ams_delta_map_io,
.init_early = omap1_init_early,
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
index eaf09efb91c..16b1423b454 100644
--- a/arch/arm/mach-omap1/clock.h
+++ b/arch/arm/mach-omap1/clock.h
@@ -17,7 +17,8 @@
#include <plat/clock.h>
-extern int __init omap1_clk_init(void);
+int omap1_clk_init(void);
+void omap1_clk_late_init(void);
extern int omap1_clk_enable(struct clk *clk);
extern void omap1_clk_disable(struct clk *clk);
extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate);
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index 92400b9eb69..9ff90a744a2 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -16,6 +16,8 @@
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
#include <linux/io.h>
#include <asm/mach-types.h> /* for machine_is_* */
@@ -767,6 +769,15 @@ static struct clk_functions omap1_clk_functions = {
.clk_disable_unused = omap1_clk_disable_unused,
};
+static void __init omap1_show_rates(void)
+{
+ pr_notice("Clocking rate (xtal/DPLL1/MPU): "
+ "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
+ ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
+ ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
+ arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
+}
+
int __init omap1_clk_init(void)
{
struct omap_clk *c;
@@ -835,9 +846,12 @@ int __init omap1_clk_init(void)
/* We want to be in syncronous scalable mode */
omap_writew(0x1000, ARM_SYSST);
-#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
- /* Use values set by bootloader. Determine PLL rate and recalculate
- * dependent clocks as if kernel had changed PLL or divisors.
+
+ /*
+ * Initially use the values set by bootloader. Determine PLL rate and
+ * recalculate dependent clocks as if kernel had changed PLL or
+ * divisors. See also omap1_clk_late_init() that can reprogram dpll1
+ * after the SRAM is initialized.
*/
{
unsigned pll_ctl_val = omap_readw(DPLL_CTL);
@@ -862,25 +876,10 @@ int __init omap1_clk_init(void)
}
}
}
-#else
- /* Find the highest supported frequency and enable it */
- if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
- printk(KERN_ERR "System frequencies not set. Check your config.\n");
- /* Guess sane values (60MHz) */
- omap_writew(0x2290, DPLL_CTL);
- omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
- ck_dpll1.rate = 60000000;
- }
-#endif
propagate_rate(&ck_dpll1);
/* Cache rates for clocks connected to ck_ref (not dpll1) */
propagate_rate(&ck_ref);
- printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
- "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
- ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
- ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
- arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
-
+ omap1_show_rates();
if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
/* Select slicer output as OMAP input clock */
omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1,
@@ -925,3 +924,27 @@ int __init omap1_clk_init(void)
return 0;
}
+
+#define OMAP1_DPLL1_SANE_VALUE 60000000
+
+void __init omap1_clk_late_init(void)
+{
+ unsigned long rate = ck_dpll1.rate;
+
+ if (rate >= OMAP1_DPLL1_SANE_VALUE)
+ return;
+
+ /* System booting at unusable rate, force reprogramming of DPLL1 */
+ ck_dpll1_p->rate = 0;
+
+ /* Find the highest supported frequency and enable it */
+ if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
+ pr_err("System frequencies not set, using default. Check your config.\n");
+ omap_writew(0x2290, DPLL_CTL);
+ omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL);
+ ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
+ }
+ propagate_rate(&ck_dpll1);
+ omap1_show_rates();
+ loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
+}
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index 48ef9888e82..475cb2f50d8 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -30,6 +30,8 @@
#include <plat/omap7xx.h>
#include <plat/mcbsp.h>
+#include "clock.h"
+
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -293,6 +295,7 @@ static int __init omap1_init_devices(void)
return -ENODEV;
omap_sram_init();
+ omap1_clk_late_init();
/* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 50341471890..e1293aa513d 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -334,6 +334,7 @@ config MACH_OMAP4_PANDA
config OMAP3_EMU
bool "OMAP3 debugging peripherals"
depends on ARCH_OMAP3
+ select ARM_AMBA
select OC_ETM
help
Say Y here to enable debugging hardware of omap3
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 69ab1c06913..b009f17dee5 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -4,7 +4,7 @@
# Common support
obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
- common.o gpio.o dma.o wd_timer.o
+ common.o gpio.o dma.o wd_timer.o display.o
omap-2-3-common = irq.o sdrc.o
hwmod-common = omap_hwmod.o \
@@ -264,7 +264,4 @@ smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o
obj-y += $(smsc911x-m) $(smsc911x-y)
obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o
-disp-$(CONFIG_OMAP2_DSS) := display.o
-obj-y += $(disp-m) $(disp-y)
-
obj-y += common-board-devices.o twl-common.o
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index ba1aa07bdb2..c15c5c9c908 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = {
static void __init rx51_charger_init(void)
{
WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
- GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+ GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
platform_device_register(&rx51_charger_device);
}
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 1fe35c24fba..942bb4f19f9 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/cpuidle.h>
+#include <linux/export.h>
#include <plat/prcm.h>
#include <plat/irqs.h>
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index adb2756e242..dce9905d64b 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -27,8 +27,35 @@
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
#include <plat/omap-pm.h>
+#include <plat/common.h>
#include "control.h"
+#include "display.h"
+
+#define DISPC_CONTROL 0x0040
+#define DISPC_CONTROL2 0x0238
+#define DISPC_IRQSTATUS 0x0018
+
+#define DSS_SYSCONFIG 0x10
+#define DSS_SYSSTATUS 0x14
+#define DSS_CONTROL 0x40
+#define DSS_SDI_CONTROL 0x44
+#define DSS_PLL_CONTROL 0x48
+
+#define LCD_EN_MASK (0x1 << 0)
+#define DIGIT_EN_MASK (0x1 << 1)
+
+#define FRAMEDONE_IRQ_SHIFT 0
+#define EVSYNC_EVEN_IRQ_SHIFT 2
+#define EVSYNC_ODD_IRQ_SHIFT 3
+#define FRAMEDONE2_IRQ_SHIFT 22
+#define FRAMEDONETV_IRQ_SHIFT 24
+
+/*
+ * FRAMEDONE_IRQ_TIMEOUT: how long (in milliseconds) to wait during DISPC
+ * reset before deciding that something has gone wrong
+ */
+#define FRAMEDONE_IRQ_TIMEOUT 100
static struct platform_device omap_display_device = {
.name = "omapdss",
@@ -172,3 +199,135 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
return r;
}
+
+static void dispc_disable_outputs(void)
+{
+ u32 v, irq_mask = 0;
+ bool lcd_en, digit_en, lcd2_en = false;
+ int i;
+ struct omap_dss_dispc_dev_attr *da;
+ struct omap_hwmod *oh;
+
+ oh = omap_hwmod_lookup("dss_dispc");
+ if (!oh) {
+ WARN(1, "display: could not disable outputs during reset - could not find dss_dispc hwmod\n");
+ return;
+ }
+
+ if (!oh->dev_attr) {
+ pr_err("display: could not disable outputs during reset due to missing dev_attr\n");
+ return;
+ }
+
+ da = (struct omap_dss_dispc_dev_attr *)oh->dev_attr;
+
+ /* store value of LCDENABLE and DIGITENABLE bits */
+ v = omap_hwmod_read(oh, DISPC_CONTROL);
+ lcd_en = v & LCD_EN_MASK;
+ digit_en = v & DIGIT_EN_MASK;
+
+ /* store value of LCDENABLE for LCD2 */
+ if (da->manager_count > 2) {
+ v = omap_hwmod_read(oh, DISPC_CONTROL2);
+ lcd2_en = v & LCD_EN_MASK;
+ }
+
+ if (!(lcd_en | digit_en | lcd2_en))
+ return; /* no managers currently enabled */
+
+ /*
+ * If any manager was enabled, we need to disable it before
+ * DSS clocks are disabled or DISPC module is reset
+ */
+ if (lcd_en)
+ irq_mask |= 1 << FRAMEDONE_IRQ_SHIFT;
+
+ if (digit_en) {
+ if (da->has_framedonetv_irq) {
+ irq_mask |= 1 << FRAMEDONETV_IRQ_SHIFT;
+ } else {
+ irq_mask |= 1 << EVSYNC_EVEN_IRQ_SHIFT |
+ 1 << EVSYNC_ODD_IRQ_SHIFT;
+ }
+ }
+
+ if (lcd2_en)
+ irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT;
+
+ /*
+ * clear any previous FRAMEDONE, FRAMEDONETV,
+ * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts
+ */
+ omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS);
+
+ /* disable LCD and TV managers */
+ v = omap_hwmod_read(oh, DISPC_CONTROL);
+ v &= ~(LCD_EN_MASK | DIGIT_EN_MASK);
+ omap_hwmod_write(v, oh, DISPC_CONTROL);
+
+ /* disable LCD2 manager */
+ if (da->manager_count > 2) {
+ v = omap_hwmod_read(oh, DISPC_CONTROL2);
+ v &= ~LCD_EN_MASK;
+ omap_hwmod_write(v, oh, DISPC_CONTROL2);
+ }
+
+ i = 0;
+ while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) !=
+ irq_mask) {
+ i++;
+ if (i > FRAMEDONE_IRQ_TIMEOUT) {
+ pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n");
+ break;
+ }
+ mdelay(1);
+ }
+}
+
+#define MAX_MODULE_SOFTRESET_WAIT 10000
+int omap_dss_reset(struct omap_hwmod *oh)
+{
+ struct omap_hwmod_opt_clk *oc;
+ int c = 0;
+ int i, r;
+
+ if (!(oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)) {
+ pr_err("dss_core: hwmod data doesn't contain reset data\n");
+ return -EINVAL;
+ }
+
+ for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+ if (oc->_clk)
+ clk_enable(oc->_clk);
+
+ dispc_disable_outputs();
+
+ /* clear SDI registers */
+ if (cpu_is_omap3430()) {
+ omap_hwmod_write(0x0, oh, DSS_SDI_CONTROL);
+ omap_hwmod_write(0x0, oh, DSS_PLL_CONTROL);
+ }
+
+ /*
+ * clear DSS_CONTROL register to switch DSS clock sources to
+ * PRCM clock, if any
+ */
+ omap_hwmod_write(0x0, oh, DSS_CONTROL);
+
+ omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs)
+ & SYSS_RESETDONE_MASK),
+ MAX_MODULE_SOFTRESET_WAIT, c);
+
+ if (c == MAX_MODULE_SOFTRESET_WAIT)
+ pr_warning("dss_core: waiting for reset to finish failed\n");
+ else
+ pr_debug("dss_core: softreset done\n");
+
+ for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+ if (oc->_clk)
+ clk_disable(oc->_clk);
+
+ r = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
+
+ return r;
+}
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
new file mode 100644
index 00000000000..b871b017b35
--- /dev/null
+++ b/arch/arm/mach-omap2/display.h
@@ -0,0 +1,29 @@
+/*
+ * display.h - OMAP2+ integration-specific DSS header
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_DISPLAY_H
+#define __ARCH_ARM_MACH_OMAP2_DISPLAY_H
+
+#include <linux/kernel.h>
+
+struct omap_dss_dispc_dev_attr {
+ u8 manager_count;
+ bool has_framedonetv_irq;
+};
+
+#endif
diff --git a/arch/arm/mach-omap2/io.h b/arch/arm/mach-omap2/io.h
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/arch/arm/mach-omap2/io.h
+++ /dev/null
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 292eee3be15..28fcb27005d 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
pdata->reg_size = 4;
pdata->has_ccr = true;
}
+ pdata->set_clk_src = omap2_mcbsp_set_clk_src;
+ if (id == 1)
+ pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
if (id == 2)
@@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
name, oh->name);
return PTR_ERR(pdev);
}
- pdata->set_clk_src = omap2_mcbsp_set_clk_src;
- if (id == 1)
- pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
omap_mcbsp_count++;
return 0;
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 6b3088db83b..207a2ff9a8c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -749,7 +749,7 @@ static int _count_mpu_irqs(struct omap_hwmod *oh)
ohii = &oh->mpu_irqs[i++];
} while (ohii->irq != -1);
- return i;
+ return i-1;
}
/**
@@ -772,7 +772,7 @@ static int _count_sdma_reqs(struct omap_hwmod *oh)
ohdi = &oh->sdma_reqs[i++];
} while (ohdi->dma_req != -1);
- return i;
+ return i-1;
}
/**
@@ -795,7 +795,7 @@ static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os)
mem = &os->addr[i++];
} while (mem->pa_start != mem->pa_end);
- return i;
+ return i-1;
}
/**
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index 6d720621352..a5409ce3f32 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -875,6 +875,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_slaves[] = {
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ /*
+ * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+ * driver does not use these clocks.
+ */
{ .role = "tv_clk", .clk = "dss_54m_fck" },
{ .role = "sys_clk", .clk = "dss2_fck" },
};
@@ -899,7 +903,7 @@ static struct omap_hwmod omap2420_dss_core_hwmod = {
.slaves_cnt = ARRAY_SIZE(omap2420_dss_slaves),
.masters = omap2420_dss_masters,
.masters_cnt = ARRAY_SIZE(omap2420_dss_masters),
- .flags = HWMOD_NO_IDLEST,
+ .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
};
/* l4_core -> dss_dispc */
@@ -939,6 +943,7 @@ static struct omap_hwmod omap2420_dss_dispc_hwmod = {
.slaves = omap2420_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_dss_dispc_slaves),
.flags = HWMOD_NO_IDLEST,
+ .dev_attr = &omap2_3_dss_dispc_dev_attr
};
/* l4_core -> dss_rfbi */
@@ -961,6 +966,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_rfbi_slaves[] = {
&omap2420_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+ { .role = "ick", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2_rfbi_hwmod_class,
@@ -972,6 +981,8 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
.module_offs = CORE_MOD,
},
},
+ .opt_clks = dss_rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
.slaves = omap2420_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_dss_rfbi_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -981,7 +992,7 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_dss_venc_hwmod,
- .clk = "dss_54m_fck",
+ .clk = "dss_ick",
.addr = omap2_dss_venc_addrs,
.fw = {
.omap2 = {
@@ -1001,7 +1012,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_venc_slaves[] = {
static struct omap_hwmod omap2420_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2_venc_hwmod_class,
- .main_clk = "dss1_fck",
+ .main_clk = "dss_54m_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index a2580d01c3f..c4f56cb60d7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -942,6 +942,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_slaves[] = {
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ /*
+ * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+ * driver does not use these clocks.
+ */
{ .role = "tv_clk", .clk = "dss_54m_fck" },
{ .role = "sys_clk", .clk = "dss2_fck" },
};
@@ -966,7 +970,7 @@ static struct omap_hwmod omap2430_dss_core_hwmod = {
.slaves_cnt = ARRAY_SIZE(omap2430_dss_slaves),
.masters = omap2430_dss_masters,
.masters_cnt = ARRAY_SIZE(omap2430_dss_masters),
- .flags = HWMOD_NO_IDLEST,
+ .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
};
/* l4_core -> dss_dispc */
@@ -1000,6 +1004,7 @@ static struct omap_hwmod omap2430_dss_dispc_hwmod = {
.slaves = omap2430_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_dss_dispc_slaves),
.flags = HWMOD_NO_IDLEST,
+ .dev_attr = &omap2_3_dss_dispc_dev_attr
};
/* l4_core -> dss_rfbi */
@@ -1016,6 +1021,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_rfbi_slaves[] = {
&omap2430_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+ { .role = "ick", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2_rfbi_hwmod_class,
@@ -1027,6 +1036,8 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
.module_offs = CORE_MOD,
},
},
+ .opt_clks = dss_rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
.slaves = omap2430_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_dss_rfbi_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -1036,7 +1047,7 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_dss_venc_hwmod,
- .clk = "dss_54m_fck",
+ .clk = "dss_ick",
.addr = omap2_dss_venc_addrs,
.flags = OCPIF_SWSUP_IDLE,
.user = OCP_USER_MPU | OCP_USER_SDMA,
@@ -1050,7 +1061,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_venc_slaves[] = {
static struct omap_hwmod omap2430_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2_venc_hwmod_class,
- .main_clk = "dss1_fck",
+ .main_clk = "dss_54m_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
index c451729d289..c11273da5dc 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
@@ -11,6 +11,7 @@
#include <plat/omap_hwmod.h>
#include <plat/serial.h>
#include <plat/dma.h>
+#include <plat/common.h>
#include <mach/irqs.h>
@@ -43,13 +44,15 @@ static struct omap_hwmod_class_sysconfig omap2_dss_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+ SYSS_HAS_RESET_STATUS),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2_dss_hwmod_class = {
.name = "dss",
.sysc = &omap2_dss_sysc,
+ .reset = omap_dss_reset,
};
/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index bc9035ec87f..7f8915ad509 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1369,9 +1369,14 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = {
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
- { .role = "tv_clk", .clk = "dss_tv_fck" },
- { .role = "video_clk", .clk = "dss_96m_fck" },
+ /*
+ * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+ * driver does not use these clocks.
+ */
{ .role = "sys_clk", .clk = "dss2_alwon_fck" },
+ { .role = "tv_clk", .clk = "dss_tv_fck" },
+ /* required only on OMAP3430 */
+ { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
};
static struct omap_hwmod omap3430es1_dss_core_hwmod = {
@@ -1394,11 +1399,12 @@ static struct omap_hwmod omap3430es1_dss_core_hwmod = {
.slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves),
.masters = omap3xxx_dss_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
- .flags = HWMOD_NO_IDLEST,
+ .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
};
static struct omap_hwmod omap3xxx_dss_core_hwmod = {
.name = "dss_core",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.class = &omap2_dss_hwmod_class,
.main_clk = "dss1_alwon_fck", /* instead of dss_fck */
.sdma_reqs = omap3xxx_dss_sdma_chs,
@@ -1456,6 +1462,7 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
.slaves = omap3xxx_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves),
.flags = HWMOD_NO_IDLEST,
+ .dev_attr = &omap2_3_dss_dispc_dev_attr
};
/*
@@ -1486,6 +1493,7 @@ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_dsi1_hwmod,
+ .clk = "dss_ick",
.addr = omap3xxx_dss_dsi1_addrs,
.fw = {
.omap2 = {
@@ -1502,6 +1510,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = {
&omap3xxx_l4_core__dss_dsi1,
};
+static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = {
+ { .role = "sys_clk", .clk = "dss2_alwon_fck" },
+};
+
static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
.name = "dss_dsi1",
.class = &omap3xxx_dsi_hwmod_class,
@@ -1514,6 +1526,8 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = dss_dsi1_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks),
.slaves = omap3xxx_dss_dsi1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -1540,6 +1554,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = {
&omap3xxx_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+ { .role = "ick", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2_rfbi_hwmod_class,
@@ -1551,6 +1569,8 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = dss_rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
.slaves = omap3xxx_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -1560,7 +1580,7 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_venc_hwmod,
- .clk = "dss_tv_fck",
+ .clk = "dss_ick",
.addr = omap2_dss_venc_addrs,
.fw = {
.omap2 = {
@@ -1578,10 +1598,15 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = {
&omap3xxx_l4_core__dss_venc,
};
+static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = {
+ /* required only on OMAP3430 */
+ { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
+};
+
static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2_venc_hwmod_class,
- .main_clk = "dss1_alwon_fck",
+ .main_clk = "dss_tv_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
@@ -1589,6 +1614,8 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = dss_venc_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_venc_opt_clks),
.slaves = omap3xxx_dss_venc_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves),
.flags = HWMOD_NO_IDLEST,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 7695e5d4331..daaf165af69 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -30,6 +30,7 @@
#include <plat/mmc.h>
#include <plat/i2c.h>
#include <plat/dmtimer.h>
+#include <plat/common.h>
#include "omap_hwmod_common_data.h"
@@ -1187,6 +1188,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = {
static struct omap_hwmod_class omap44xx_dss_hwmod_class = {
.name = "dss",
.sysc = &omap44xx_dss_sysc,
+ .reset = omap_dss_reset,
};
/* dss */
@@ -1240,12 +1242,12 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_slaves[] = {
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
{ .role = "sys_clk", .clk = "dss_sys_clk" },
{ .role = "tv_clk", .clk = "dss_tv_clk" },
- { .role = "dss_clk", .clk = "dss_dss_clk" },
- { .role = "video_clk", .clk = "dss_48mhz_clk" },
+ { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
};
static struct omap_hwmod omap44xx_dss_hwmod = {
.name = "dss_core",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.class = &omap44xx_dss_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.main_clk = "dss_dss_clk",
@@ -1325,6 +1327,11 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = {
{ }
};
+static struct omap_dss_dispc_dev_attr omap44xx_dss_dispc_dev_attr = {
+ .manager_count = 3,
+ .has_framedonetv_irq = 1
+};
+
/* l4_per -> dss_dispc */
static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = {
.master = &omap44xx_l4_per_hwmod,
@@ -1340,12 +1347,6 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_dispc_slaves[] = {
&omap44xx_l4_per__dss_dispc,
};
-static struct omap_hwmod_opt_clk dss_dispc_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
- { .role = "tv_clk", .clk = "dss_tv_clk" },
- { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
-};
-
static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
.name = "dss_dispc",
.class = &omap44xx_dispc_hwmod_class,
@@ -1359,10 +1360,9 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
.context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
},
},
- .opt_clks = dss_dispc_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_dispc_opt_clks),
.slaves = omap44xx_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves),
+ .dev_attr = &omap44xx_dss_dispc_dev_attr
};
/*
@@ -1624,7 +1624,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_hdmi_irqs,
.sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
- .main_clk = "dss_dss_clk",
+ .main_clk = "dss_48mhz_clk",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
@@ -1785,7 +1785,7 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap44xx_venc_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
- .main_clk = "dss_dss_clk",
+ .main_clk = "dss_tv_clk",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.c b/arch/arm/mach-omap2/omap_hwmod_common_data.c
index de832ebc93a..51e5418899f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.c
@@ -49,3 +49,7 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
.srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT,
};
+struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = {
+ .manager_count = 2,
+ .has_framedonetv_irq = 0
+};
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h
index 39a7c37f458..ad5d8f04c0b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h
@@ -16,6 +16,8 @@
#include <plat/omap_hwmod.h>
+#include "display.h"
+
/* Common address space across OMAP2xxx */
extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[];
extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[];
@@ -111,4 +113,6 @@ extern struct omap_hwmod_class omap2xxx_dma_hwmod_class;
extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class;
extern struct omap_hwmod_class omap2xxx_mcspi_class;
+extern struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr;
+
#endif
diff --git a/arch/arm/mach-omap2/omap_l3_noc.c b/arch/arm/mach-omap2/omap_l3_noc.c
index 6a66aa5e2a5..d15225ff5c4 100644
--- a/arch/arm/mach-omap2/omap_l3_noc.c
+++ b/arch/arm/mach-omap2/omap_l3_noc.c
@@ -237,7 +237,7 @@ static int __devexit omap4_l3_remove(struct platform_device *pdev)
static const struct of_device_id l3_noc_match[] = {
{.compatible = "ti,omap4-l3-noc", },
{},
-}
+};
MODULE_DEVICE_TABLE(of, l3_noc_match);
#else
#define l3_noc_match NULL
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 1e79bdf313e..00bff46ca48 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -24,6 +24,7 @@
#include "powerdomain.h"
#include "clockdomain.h"
#include "pm.h"
+#include "twl-common.h"
static struct omap_device_pm_latency *pm_lats;
@@ -226,11 +227,8 @@ postcore_initcall(omap2_common_pm_init);
static int __init omap2_common_pm_late_init(void)
{
- /* Init the OMAP TWL parameters */
- omap3_twl_init();
- omap4_twl_init();
-
/* Init the voltage layer */
+ omap_pmic_late_init();
omap_voltage_late_init();
/* Initialize the voltages */
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 6a4f6839a7d..cf246b39bac 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -139,7 +139,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
sr_write_reg(sr_info, ERRCONFIG_V1, status);
} else if (sr_info->ip_type == SR_TYPE_V2) {
/* Read the status bits */
- sr_read_reg(sr_info, IRQSTATUS);
+ status = sr_read_reg(sr_info, IRQSTATUS);
/* Clear them by writing back */
sr_write_reg(sr_info, IRQSTATUS, status);
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 52243577216..10b20c652e5 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -30,6 +30,7 @@
#include <plat/usb.h>
#include "twl-common.h"
+#include "pm.h"
static struct i2c_board_info __initdata pmic_i2c_board_info = {
.addr = 0x48,
@@ -48,6 +49,16 @@ void __init omap_pmic_init(int bus, u32 clkrate,
omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
}
+void __init omap_pmic_late_init(void)
+{
+ /* Init the OMAP TWL parameters (if PMIC has been registerd) */
+ if (!pmic_i2c_board_info.irq)
+ return;
+
+ omap3_twl_init();
+ omap4_twl_init();
+}
+
#if defined(CONFIG_ARCH_OMAP3)
static struct twl4030_usb_data omap3_usb_pdata = {
.usb_mode = T2_USB_MODE_ULPI,
diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h
index 5e83a5bd37f..275dde8cb27 100644
--- a/arch/arm/mach-omap2/twl-common.h
+++ b/arch/arm/mach-omap2/twl-common.h
@@ -1,6 +1,8 @@
#ifndef __OMAP_PMIC_COMMON__
#define __OMAP_PMIC_COMMON__
+#include <plat/irqs.h>
+
#define TWL_COMMON_PDATA_USB (1 << 0)
#define TWL_COMMON_PDATA_BCI (1 << 1)
#define TWL_COMMON_PDATA_MADC (1 << 2)
@@ -30,6 +32,7 @@ struct twl4030_platform_data;
void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
struct twl4030_platform_data *pmic_data);
+void omap_pmic_late_init(void);
static inline void omap2_pmic_init(const char *pmic_type,
struct twl4030_platform_data *pmic_data)
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index cb53160f6c5..26ebb57719d 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/suspend.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c
index ef555c04196..a12b689a870 100644
--- a/arch/arm/mach-prima2/prima2.c
+++ b/arch/arm/mach-prima2/prima2.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
+#include <asm/sizes.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <linux/of.h>
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index fc0b8544e17..4b81f59a4cb 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -307,7 +307,7 @@ static inline void balloon3_mmc_init(void) {}
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static void balloon3_udc_command(int cmd)
{
if (cmd == PXA2XX_UDC_CMD_CONNECT)
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index 692e1ffc558..d23b92b8048 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -146,7 +146,7 @@ static void __init colibri_pxa320_init_eth(void)
static inline void __init colibri_pxa320_init_eth(void) {}
#endif /* CONFIG_AX88796 */
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = {
.gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96),
.gpio_pullup = -1,
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index 9c8208ca041..ffdd70dad32 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -106,7 +106,7 @@ static void __init gumstix_mmc_init(void)
}
#endif
-#ifdef CONFIG_USB_GADGET_PXA25X
+#ifdef CONFIG_USB_PXA25X
static struct gpio_vbus_mach_info gumstix_udc_info = {
.gpio_vbus = GPIO_GUMSTIX_USB_GPIOn,
.gpio_pullup = GPIO_GUMSTIX_USB_GPIOx,
diff --git a/arch/arm/mach-pxa/include/mach/palm27x.h b/arch/arm/mach-pxa/include/mach/palm27x.h
index f80bbe246af..d4eac3d6ffb 100644
--- a/arch/arm/mach-pxa/include/mach/palm27x.h
+++ b/arch/arm/mach-pxa/include/mach/palm27x.h
@@ -37,8 +37,8 @@ extern void __init palm27x_lcd_init(int power,
#define palm27x_lcd_init(power, mode) do {} while (0)
#endif
-#if defined(CONFIG_USB_GADGET_PXA27X) || \
- defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X) || \
+ defined(CONFIG_USB_PXA27X_MODULE)
extern void __init palm27x_udc_init(int vbus, int pullup,
int vbus_inverted);
#else
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 325c245c0a0..fbc10d7b95d 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -164,8 +164,8 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode)
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X) || \
- defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X) || \
+ defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info palm27x_udc_info = {
.gpio_vbus_inverted = 1,
};
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 6ec7caefb37..2c24c67fd92 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -338,7 +338,7 @@ static inline void palmtc_mkp_init(void) {}
/******************************************************************************
* UDC
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA25X)||defined(CONFIG_USB_GADGET_PXA25X_MODULE)
+#if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE)
static struct gpio_vbus_mach_info palmtc_udc_info = {
.gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N,
.gpio_vbus_inverted = 1,
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index a7539a6ed1f..ca0c6615028 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -343,7 +343,7 @@ static inline void vpac270_uhc_init(void) {}
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
.gpio_vbus = GPIO41_VPAC270_UDC_DETECT,
.gpio_pullup = -1,
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index 5e6b42089eb..3341fd11872 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 66668565ee7..f208154b138 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -8,7 +8,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
diff --git a/arch/arm/mach-s3c64xx/s3c6400.c b/arch/arm/mach-s3c64xx/s3c6400.c
index 7a3bc32df42..51c00f2453c 100644
--- a/arch/arm/mach-s3c64xx/s3c6400.c
+++ b/arch/arm/mach-s3c64xx/s3c6400.c
@@ -70,7 +70,7 @@ void __init s3c6400_init_irq(void)
s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
}
-struct sysdev_class s3c6400_sysclass = {
+static struct sysdev_class s3c6400_sysclass = {
.name = "s3c6400-core",
};
diff --git a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
index 83d2afb79e9..2cf80026c58 100644
--- a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
+++ b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
@@ -20,7 +20,7 @@
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
-extern void s3c64xx_fb_gpio_setup_24bpp(void)
+void s3c64xx_fb_gpio_setup_24bpp(void)
{
s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index a9106c39239..8662ef6e568 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -273,6 +273,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
static struct platform_pwm_backlight_data smdkv210_bl_data = {
.pwm_id = 3,
+ .pwm_period_ns = 1000,
};
static void __init smdkv210_map_io(void)
diff --git a/arch/arm/mach-sa1100/Makefile.boot b/arch/arm/mach-sa1100/Makefile.boot
index 5a616f6e561..f7951aa0456 100644
--- a/arch/arm/mach-sa1100/Makefile.boot
+++ b/arch/arm/mach-sa1100/Makefile.boot
@@ -1,5 +1,5 @@
-ifeq ($(CONFIG_ARCH_SA1100),y)
- zreladdr-$(CONFIG_SA1111) += 0xc0208000
+ifeq ($(CONFIG_SA1111),y)
+ zreladdr-y += 0xc0208000
else
zreladdr-y += 0xc0008000
endif
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index b862e9f81e3..7119b87cbfa 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -607,6 +607,7 @@ struct sys_timer ag5evm_timer = {
MACHINE_START(AG5EVM, "ag5evm")
.map_io = ag5evm_map_io,
+ .nr_irqs = NR_IRQS_LEGACY,
.init_irq = sh73a0_init_irq,
.handle_irq = shmobile_handle_irq_gic,
.init_machine = ag5evm_init,
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index bd9a78424d6..f44150b5ae4 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -33,6 +33,7 @@
#include <linux/input/sh_keysc.h>
#include <linux/gpio_keys.h>
#include <linux/leds.h>
+#include <linux/platform_data/leds-renesas-tpu.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h>
#include <linux/mfd/tmio.h>
@@ -56,7 +57,7 @@ static struct resource smsc9220_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gic_spi(33), /* PINTA2 @ PORT144 */
+ .start = SH73A0_PINT0_IRQ(2), /* PINTA2 */
.flags = IORESOURCE_IRQ,
},
};
@@ -157,10 +158,6 @@ static struct platform_device gpio_keys_device = {
#define GPIO_LED(n, g) { .name = n, .gpio = g }
static struct gpio_led gpio_leds[] = {
- GPIO_LED("V2513", GPIO_PORT153), /* PORT153 [TPU1T02] -> V2513 */
- GPIO_LED("V2514", GPIO_PORT199), /* PORT199 [TPU4TO1] -> V2514 */
- GPIO_LED("V2515", GPIO_PORT197), /* PORT197 [TPU2TO1] -> V2515 */
- GPIO_LED("KEYLED", GPIO_PORT163), /* PORT163 [TPU3TO0] -> KEYLED */
GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */
GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */
GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */
@@ -179,6 +176,119 @@ static struct platform_device gpio_leds_device = {
},
};
+/* TPU LED */
+static struct led_renesas_tpu_config led_renesas_tpu12_pdata = {
+ .name = "V2513",
+ .pin_gpio_fn = GPIO_FN_TPU1TO2,
+ .pin_gpio = GPIO_PORT153,
+ .channel_offset = 0x90,
+ .timer_bit = 2,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu12_resources[] = {
+ [0] = {
+ .name = "TPU12",
+ .start = 0xe6610090,
+ .end = 0xe66100b5,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu12_device = {
+ .name = "leds-renesas-tpu",
+ .id = 12,
+ .dev = {
+ .platform_data = &led_renesas_tpu12_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu12_resources),
+ .resource = tpu12_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu41_pdata = {
+ .name = "V2514",
+ .pin_gpio_fn = GPIO_FN_TPU4TO1,
+ .pin_gpio = GPIO_PORT199,
+ .channel_offset = 0x50,
+ .timer_bit = 1,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu41_resources[] = {
+ [0] = {
+ .name = "TPU41",
+ .start = 0xe6640050,
+ .end = 0xe6640075,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu41_device = {
+ .name = "leds-renesas-tpu",
+ .id = 41,
+ .dev = {
+ .platform_data = &led_renesas_tpu41_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu41_resources),
+ .resource = tpu41_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu21_pdata = {
+ .name = "V2515",
+ .pin_gpio_fn = GPIO_FN_TPU2TO1,
+ .pin_gpio = GPIO_PORT197,
+ .channel_offset = 0x50,
+ .timer_bit = 1,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu21_resources[] = {
+ [0] = {
+ .name = "TPU21",
+ .start = 0xe6620050,
+ .end = 0xe6620075,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu21_device = {
+ .name = "leds-renesas-tpu",
+ .id = 21,
+ .dev = {
+ .platform_data = &led_renesas_tpu21_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu21_resources),
+ .resource = tpu21_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu30_pdata = {
+ .name = "KEYLED",
+ .pin_gpio_fn = GPIO_FN_TPU3TO0,
+ .pin_gpio = GPIO_PORT163,
+ .channel_offset = 0x10,
+ .timer_bit = 0,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu30_resources[] = {
+ [0] = {
+ .name = "TPU30",
+ .start = 0xe6630010,
+ .end = 0xe6630035,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu30_device = {
+ .name = "leds-renesas-tpu",
+ .id = 30,
+ .dev = {
+ .platform_data = &led_renesas_tpu30_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu30_resources),
+ .resource = tpu30_resources,
+};
+
/* MMCIF */
static struct resource mmcif_resources[] = {
[0] = {
@@ -291,6 +401,10 @@ static struct platform_device *kota2_devices[] __initdata = {
&keysc_device,
&gpio_keys_device,
&gpio_leds_device,
+ &leds_tpu12_device,
+ &leds_tpu41_device,
+ &leds_tpu21_device,
+ &leds_tpu30_device,
&mmcif_device,
&sdhi0_device,
&sdhi1_device,
@@ -317,18 +431,6 @@ static void __init kota2_map_io(void)
shmobile_setup_console();
}
-#define PINTER0A 0xe69000a0
-#define PINTCR0A 0xe69000b0
-
-void __init kota2_init_irq(void)
-{
- sh73a0_init_irq();
-
- /* setup PINT: enable PINTA2 as active low */
- __raw_writel(1 << 29, PINTER0A);
- __raw_writew(2 << 10, PINTCR0A);
-}
-
static void __init kota2_init(void)
{
sh73a0_pinmux_init();
@@ -447,7 +549,8 @@ struct sys_timer kota2_timer = {
MACHINE_START(KOTA2, "kota2")
.map_io = kota2_map_io,
- .init_irq = kota2_init_irq,
+ .nr_irqs = NR_IRQS_LEGACY,
+ .init_irq = sh73a0_init_irq,
.handle_irq = shmobile_handle_irq_gic,
.init_machine = kota2_init,
.timer = &kota2_timer,
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 61a846bb30f..1370a89ca35 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -113,6 +113,12 @@ static struct clk main_clk = {
.ops = &main_clk_ops,
};
+/* Divide Main clock by two */
+static struct clk main_div2_clk = {
+ .ops = &div2_clk_ops,
+ .parent = &main_clk,
+};
+
/* PLL0, PLL1, PLL2, PLL3 */
static unsigned long pll_recalc(struct clk *clk)
{
@@ -181,6 +187,7 @@ static struct clk *main_clks[] = {
&extal1_div2_clk,
&extal2_div2_clk,
&main_clk,
+ &main_div2_clk,
&pll0_clk,
&pll1_clk,
&pll2_clk,
@@ -243,7 +250,7 @@ static struct clk div6_clks[DIV6_NR] = {
[DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
[DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
[DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
- [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
+ [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, CLK_ENABLE_ON_INIT),
[DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
[DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
[DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
@@ -268,6 +275,7 @@ enum { MSTP001,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
MSTP314, MSTP313, MSTP312, MSTP311,
+ MSTP303, MSTP302, MSTP301, MSTP300,
MSTP411, MSTP410, MSTP403,
MSTP_NR };
@@ -301,6 +309,10 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
[MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
+ [MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */
+ [MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */
+ [MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */
+ [MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */
[MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
[MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
@@ -350,6 +362,10 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.12", &mstp_clks[MSTP303]), /* TPU1 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.21", &mstp_clks[MSTP302]), /* TPU2 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.30", &mstp_clks[MSTP301]), /* TPU3 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.41", &mstp_clks[MSTP300]), /* TPU4 */
CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 8ac9e9f8479..b1e192ba8c2 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -61,7 +61,7 @@ static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
-#ifdef CONFIG_ARM_ERRATA_753970
+#ifdef CONFIG_PL310_ERRATA_753970
/* write to an unmmapped register */
writel_relaxed(0, base + L2X0_DUMMY_REG);
#else
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4e7f6cba1a..1aa664a1999 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -168,7 +168,7 @@ static int __init consistent_init(void)
pte_t *pte;
int i = 0;
unsigned long base = consistent_base;
- unsigned long num_ptes = (CONSISTENT_END - base) >> PGDIR_SHIFT;
+ unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
@@ -332,6 +332,15 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
struct page *page;
void *addr;
+ /*
+ * Following is a work-around (a.k.a. hack) to prevent pages
+ * with __GFP_COMP being passed to split_page() which cannot
+ * handle them. The real problem is that this flag probably
+ * should be 0 on ARM as it is not supported on this
+ * platform; see CONFIG_HUGETLBFS.
+ */
+ gfp &= ~(__GFP_COMP);
+
*handle = ~0;
size = PAGE_ALIGN(size);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 74be05f3e03..44b628e4d6e 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -9,8 +9,7 @@
#include <linux/io.h>
#include <linux/personality.h>
#include <linux/random.h>
-#include <asm/cputype.h>
-#include <asm/system.h>
+#include <asm/cachetype.h>
#define COLOUR_ALIGN(addr,pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
@@ -32,25 +31,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
- unsigned int cache_type;
- int do_align = 0, aliasing = 0;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
/*
* We only need to do colour alignment if either the I or D
- * caches alias. This is indicated by bits 9 and 21 of the
- * cache type register.
+ * caches alias.
*/
- cache_type = read_cpuid_cachetype();
- if (cache_type != read_cpuid_id()) {
- aliasing = (cache_type | cache_type >> 12) & (1 << 11);
- if (aliasing)
- do_align = filp || flags & MAP_SHARED;
- }
-#else
-#define do_align 0
-#define aliasing 0
-#endif
+ if (aliasing)
+ do_align = filp || (flags & MAP_SHARED);
/*
* We enforce the MAP_FIXED case.
diff --git a/arch/arm/plat-mxc/cpufreq.c b/arch/arm/plat-mxc/cpufreq.c
index 74aac96cda2..adbff706ef6 100644
--- a/arch/arm/plat-mxc/cpufreq.c
+++ b/arch/arm/plat-mxc/cpufreq.c
@@ -17,6 +17,7 @@
* the CPU clock speed on the fly.
*/
+#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 83b745a5e1b..c75f254abd8 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -85,7 +85,6 @@ enum mxc_cpu_pwr_mode {
};
extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode);
-extern void (*imx_idle)(void);
extern void imx_print_silicon_rev(const char *cpu, int srev);
void avic_handle_irq(struct pt_regs *);
@@ -133,4 +132,5 @@ extern void imx53_qsb_common_init(void);
extern void imx53_smd_common_init(void);
extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
extern void imx6q_pm_init(void);
+extern void imx6q_clock_map_io(void);
#endif
diff --git a/arch/arm/plat-mxc/include/mach/mxc.h b/arch/arm/plat-mxc/include/mach/mxc.h
index 00a78193c68..a4d36d601d5 100644
--- a/arch/arm/plat-mxc/include/mach/mxc.h
+++ b/arch/arm/plat-mxc/include/mach/mxc.h
@@ -50,20 +50,6 @@
#define IMX_CHIP_REVISION_3_3 0x33
#define IMX_CHIP_REVISION_UNKNOWN 0xff
-#define IMX_CHIP_REVISION_1_0_STRING "1.0"
-#define IMX_CHIP_REVISION_1_1_STRING "1.1"
-#define IMX_CHIP_REVISION_1_2_STRING "1.2"
-#define IMX_CHIP_REVISION_1_3_STRING "1.3"
-#define IMX_CHIP_REVISION_2_0_STRING "2.0"
-#define IMX_CHIP_REVISION_2_1_STRING "2.1"
-#define IMX_CHIP_REVISION_2_2_STRING "2.2"
-#define IMX_CHIP_REVISION_2_3_STRING "2.3"
-#define IMX_CHIP_REVISION_3_0_STRING "3.0"
-#define IMX_CHIP_REVISION_3_1_STRING "3.1"
-#define IMX_CHIP_REVISION_3_2_STRING "3.2"
-#define IMX_CHIP_REVISION_3_3_STRING "3.3"
-#define IMX_CHIP_REVISION_UNKNOWN_STRING "unknown"
-
#ifndef __ASSEMBLY__
extern unsigned int __mxc_cpu_type;
#endif
diff --git a/arch/arm/plat-mxc/include/mach/system.h b/arch/arm/plat-mxc/include/mach/system.h
index cf88b3593fb..b9895d25016 100644
--- a/arch/arm/plat-mxc/include/mach/system.h
+++ b/arch/arm/plat-mxc/include/mach/system.h
@@ -17,14 +17,9 @@
#ifndef __ASM_ARCH_MXC_SYSTEM_H__
#define __ASM_ARCH_MXC_SYSTEM_H__
-extern void (*imx_idle)(void);
-
static inline void arch_idle(void)
{
- if (imx_idle != NULL)
- (imx_idle)();
- else
- cpu_do_idle();
+ cpu_do_idle();
}
void arch_reset(char mode, const char *cmd);
diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
index 42d74ea5908..845de59f07e 100644
--- a/arch/arm/plat-mxc/pwm.c
+++ b/arch/arm/plat-mxc/pwm.c
@@ -32,6 +32,9 @@
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
#define MX3_PWMPR 0x10 /* PWM Period Register */
#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN (1 << 24)
+#define MX3_PWMCR_WAITEN (1 << 23)
+#define MX3_PWMCR_DBGEN (1 << 22)
#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
#define MX3_PWMCR_EN (1 << 0)
@@ -77,7 +80,9 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
- cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+ cr = MX3_PWMCR_PRESCALER(prescale) |
+ MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+ MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
if (cpu_is_mx25())
cr |= MX3_PWMCR_CLKSRC_IPG;
diff --git a/arch/arm/plat-mxc/system.c b/arch/arm/plat-mxc/system.c
index 9dad8dcc2ea..d65fb31a55c 100644
--- a/arch/arm/plat-mxc/system.c
+++ b/arch/arm/plat-mxc/system.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <mach/hardware.h>
#include <mach/common.h>
@@ -28,8 +29,8 @@
#include <asm/system.h>
#include <asm/mach-types.h>
-void (*imx_idle)(void) = NULL;
void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL;
+EXPORT_SYMBOL_GPL(imx_ioremap);
static void __iomem *wdog_base;
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 197ca03c3f7..eb73ab40e95 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -165,8 +165,8 @@ struct dpll_data {
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
- u8 flags;
# endif
+ u8 flags;
};
#endif
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index c50df4814f6..3ff3e36580f 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <plat/i2c.h>
+#include <plat/omap_hwmod.h>
struct sys_timer;
@@ -55,6 +56,8 @@ void am35xx_init_early(void);
void ti816x_init_early(void);
void omap4430_init_early(void);
+extern int omap_dss_reset(struct omap_hwmod *);
+
void omap_sram_init(void);
/*
diff --git a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
index a9276667c2f..c7adad0e8de 100644
--- a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
+++ b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
@@ -12,7 +12,7 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/cpufreq.h>
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
index e1cbc728c77..c8bec9c7655 100644
--- a/arch/arm/plat-s5p/sysmmu.c
+++ b/arch/arm/plat-s5p/sysmmu.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/export.h>
#include <asm/pgtable.h>
diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c
index e657305644c..a976c023b28 100644
--- a/arch/arm/plat-samsung/dev-backlight.c
+++ b/arch/arm/plat-samsung/dev-backlight.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/pwm_backlight.h>
-#include <linux/slab.h>
#include <plat/devs.h>
#include <plat/gpio-cfg.h>
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index d48245bb02b..df8155b9d4d 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -24,6 +24,8 @@
#ifndef __PLAT_GPIO_CFG_H
#define __PLAT_GPIO_CFG_H __FILE__
+#include<linux/types.h>
+
typedef unsigned int __bitwise__ samsung_gpio_pull_t;
typedef unsigned int __bitwise__ s5p_gpio_drvstr_t;
diff --git a/arch/arm/plat-samsung/pd.c b/arch/arm/plat-samsung/pd.c
index efe1d564473..312b510d86b 100644
--- a/arch/arm/plat-samsung/pd.c
+++ b/arch/arm/plat-samsung/pd.c
@@ -11,7 +11,7 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
diff --git a/arch/arm/plat-samsung/pwm.c b/arch/arm/plat-samsung/pwm.c
index dc1185dcf80..c559d8438c7 100644
--- a/arch/arm/plat-samsung/pwm.c
+++ b/arch/arm/plat-samsung/pwm.c
@@ -11,7 +11,7 @@
* the Free Software Foundation; either version 2 of the License.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 5bdeef96984..ccbe16f4722 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -1123,5 +1123,6 @@ blissc MACH_BLISSC BLISSC 3491
thales_adc MACH_THALES_ADC THALES_ADC 3492
ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
atdgp318 MACH_ATDGP318 ATDGP318 3494
+m28evk MACH_M28EVK M28EVK 3613
smdk4212 MACH_SMDK4212 SMDK4212 3638
smdk4412 MACH_SMDK4412 SMDK4412 3765
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 43f984e9397..303192fc926 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -350,10 +350,12 @@
#define __NR_clock_adjtime 342
#define __NR_syncfs 343
#define __NR_setns 344
+#define __NR_process_vm_readv 345
+#define __NR_process_vm_writev 346
#ifdef __KERNEL__
-#define NR_syscalls 345
+#define NR_syscalls 347
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index c468f2edaa8..ce827b37611 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -365,4 +365,6 @@ ENTRY(sys_call_table)
.long sys_clock_adjtime
.long sys_syncfs
.long sys_setns
+ .long sys_process_vm_readv /* 345 */
+ .long sys_process_vm_writev
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 4f2971bcf8e..315fc0b250f 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -623,7 +623,7 @@ static int mipspmu_event_init(struct perf_event *event)
if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
atomic_dec(&active_events);
- return -ENOSPC;
+ return -EINVAL;
}
mutex_lock(&pmu_reserve_mutex);
@@ -732,15 +732,15 @@ static int validate_group(struct perf_event *event)
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
if (!validate_event(&fake_cpuc, leader))
- return -ENOSPC;
+ return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_cpuc, sibling))
- return -ENOSPC;
+ return -EINVAL;
}
if (!validate_event(&fake_cpuc, event))
- return -ENOSPC;
+ return -EINVAL;
return 0;
}
diff --git a/arch/powerpc/boot/dts/p1023rds.dts b/arch/powerpc/boot/dts/p1023rds.dts
index d9b776740a6..d3b478242ea 100644
--- a/arch/powerpc/boot/dts/p1023rds.dts
+++ b/arch/powerpc/boot/dts/p1023rds.dts
@@ -449,6 +449,7 @@
interrupt-parent = <&mpic>;
interrupts = <16 2>;
interrupt-map-mask = <0xf800 0 0 7>;
+ /* IRQ[0:3] are pulled up on board, set to active-low */
interrupt-map = <
/* IDSEL 0x0 */
0000 0 0 1 &mpic 0 1
@@ -488,11 +489,15 @@
interrupt-parent = <&mpic>;
interrupts = <16 2>;
interrupt-map-mask = <0xf800 0 0 7>;
+ /*
+ * IRQ[4:6] only for PCIe, set to active-high,
+ * IRQ[7] is pulled up on board, set to active-low
+ */
interrupt-map = <
/* IDSEL 0x0 */
- 0000 0 0 1 &mpic 4 1
- 0000 0 0 2 &mpic 5 1
- 0000 0 0 3 &mpic 6 1
+ 0000 0 0 1 &mpic 4 2
+ 0000 0 0 2 &mpic 5 2
+ 0000 0 0 3 &mpic 6 2
0000 0 0 4 &mpic 7 1
>;
ranges = <0x2000000 0x0 0xa0000000
@@ -527,12 +532,16 @@
interrupt-parent = <&mpic>;
interrupts = <16 2>;
interrupt-map-mask = <0xf800 0 0 7>;
+ /*
+ * IRQ[8:10] are pulled up on board, set to active-low
+ * IRQ[11] only for PCIe, set to active-high,
+ */
interrupt-map = <
/* IDSEL 0x0 */
0000 0 0 1 &mpic 8 1
0000 0 0 2 &mpic 9 1
0000 0 0 3 &mpic 10 1
- 0000 0 0 4 &mpic 11 1
+ 0000 0 0 4 &mpic 11 2
>;
ranges = <0x2000000 0x0 0x80000000
0x2000000 0x0 0x80000000
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 6cdf1c0d2c8..3b98d735434 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -52,6 +52,8 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_NAND=m
+CONFIG_MTD_NAND_NDFC=m
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_PROC_DEVICETREE=y
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5964371303a..8558b572e55 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -15,6 +15,7 @@
#include <linux/of_fdt.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
+#include <linux/moduleparam.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 45023e26aea..d7946be298b 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -203,7 +203,7 @@ config P3060_QDS
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
- select MPC8xxx_GPIO
+ select GPIO_MPC8XXX
select HAS_RAPIDIO
select PPC_EPAPR_HV_PIC
help
diff --git a/arch/powerpc/platforms/85xx/p3060_qds.c b/arch/powerpc/platforms/85xx/p3060_qds.c
index 01dcf44871e..081cf4ac188 100644
--- a/arch/powerpc/platforms/85xx/p3060_qds.c
+++ b/arch/powerpc/platforms/85xx/p3060_qds.c
@@ -70,7 +70,7 @@ define_machine(p3060_qds) {
.power_save = e500_idle,
};
-machine_device_initcall(p3060_qds, declare_of_platform_devices);
+machine_device_initcall(p3060_qds, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index af1a5df46b3..b6731e4a664 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -280,6 +280,7 @@ void __init ehv_pic_init(void)
if (!ehv_pic->irqhost) {
of_node_put(np);
+ kfree(ehv_pic);
return;
}
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index c4d96fa32ba..d5c3c90ee69 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -328,6 +328,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
err:
iounmap(fsl_lbc_ctrl_dev->regs);
kfree(fsl_lbc_ctrl_dev);
+ fsl_lbc_ctrl_dev = NULL;
return ret;
}
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 3363fbc964f..ceb09cbd232 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -216,7 +216,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
/* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
that the BRG divisor must be even if you're not using divide-by-16
mode. */
- if (!div16 && (divisor & 1))
+ if (!div16 && (divisor & 1) && (divisor > 3))
divisor++;
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 524d23b8610..4f289ff0b7f 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -599,10 +599,10 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
skey = page_get_storage_key(address);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
/* Clear page changed & referenced bit in the storage key */
- if (bits) {
- skey ^= bits;
- page_set_storage_key(address, skey, 1);
- }
+ if (bits & _PAGE_CHANGED)
+ page_set_storage_key(address, skey ^ bits, 1);
+ else if (bits)
+ page_reset_referenced(address);
/* Transfer page changed & referenced bit to guest bits in pgste */
pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
/* Get host changed & referenced bits from pgste */
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 450931a45b6..573bc29551e 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -296,13 +296,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
/* Invalid psw mask. */
return -EINVAL;
- if (addr == (addr_t) &dummy->regs.psw.addr)
- /*
- * The debugger changed the instruction address,
- * reset system call restart, see signal.c:do_signal
- */
- task_thread_info(child)->system_call = 0;
-
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -614,11 +607,6 @@ static int __poke_user_compat(struct task_struct *child,
/* Transfer 31 bit amode bit to psw mask. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
(__u64)(tmp & PSW32_ADDR_AMODE);
- /*
- * The debugger changed the instruction address,
- * reset system call restart, see signal.c:do_signal
- */
- task_thread_info(child)->system_call = 0;
} else {
/* gpr 0-15 */
*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
@@ -905,6 +893,14 @@ static int s390_last_break_get(struct task_struct *target,
return 0;
}
+static int s390_last_break_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return 0;
+}
+
#endif
static int s390_system_call_get(struct task_struct *target,
@@ -951,6 +947,7 @@ static const struct user_regset s390_regsets[] = {
.size = sizeof(long),
.align = sizeof(long),
.get = s390_last_break_get,
+ .set = s390_last_break_set,
},
#endif
[REGSET_SYSTEM_CALL] = {
@@ -1116,6 +1113,14 @@ static int s390_compat_last_break_get(struct task_struct *target,
return 0;
}
+static int s390_compat_last_break_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return 0;
+}
+
static const struct user_regset s390_compat_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
@@ -1139,6 +1144,7 @@ static const struct user_regset s390_compat_regsets[] = {
.size = sizeof(long),
.align = sizeof(long),
.get = s390_compat_last_break_get,
+ .set = s390_compat_last_break_set,
},
[REGSET_SYSTEM_CALL] = {
.core_note_type = NT_S390_SYSTEM_CALL,
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e58a462949b..e54c4ff8aba 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -579,7 +579,7 @@ static unsigned long __init find_crash_base(unsigned long crash_size,
*msg = "first memory chunk must be at least crashkernel size";
return 0;
}
- if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE))
+ if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
return OLDMEM_BASE;
for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 05a85bc14c9..7f6f9f35454 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -460,9 +460,9 @@ void do_signal(struct pt_regs *regs)
regs->svc_code >> 16);
break;
}
- /* No longer in a system call */
- clear_thread_flag(TIF_SYSCALL);
}
+ /* No longer in a system call */
+ clear_thread_flag(TIF_SYSCALL);
if ((is_compat_task() ?
handle_signal32(signr, &ka, &info, oldset, regs) :
@@ -486,6 +486,7 @@ void do_signal(struct pt_regs *regs)
}
/* No handlers present - check for system call restart */
+ clear_thread_flag(TIF_SYSCALL);
if (current_thread_info()->system_call) {
regs->svc_code = current_thread_info()->system_call;
switch (regs->gprs[2]) {
@@ -500,9 +501,6 @@ void do_signal(struct pt_regs *regs)
regs->gprs[2] = regs->orig_gpr2;
set_thread_flag(TIF_SYSCALL);
break;
- default:
- clear_thread_flag(TIF_SYSCALL);
- break;
}
}
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 6efc18b5e60..bd58b72454c 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -88,7 +88,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (oprofile_started)
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index ec8c84c14b1..895e337c79b 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -50,9 +50,9 @@ static struct platform_device heartbeat_device = {
#define GBECONT 0xffc10100
#define GBECONT_RMII1 BIT(17)
#define GBECONT_RMII0 BIT(16)
-static void sh7757_eth_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_set_mdio_gate(void *addr)
{
- if ((addr & 0x00000fff) < 0x0800)
+ if (((unsigned long)addr & 0x00000fff) < 0x0800)
writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
else
writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
@@ -116,9 +116,9 @@ static struct platform_device sh7757_eth1_device = {
},
};
-static void sh7757_eth_giga_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_giga_set_mdio_gate(void *addr)
{
- if ((addr & 0x00000fff) < 0x0800) {
+ if (((unsigned long)addr & 0x00000fff) < 0x0800) {
gpio_set_value(GPIO_PTT4, 1);
writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
} else {
@@ -210,8 +210,12 @@ static struct resource sh_mmcif_resources[] = {
};
static struct sh_mmcif_dma sh7757lcr_mmcif_dma = {
- .chan_priv_tx = SHDMA_SLAVE_MMCIF_TX,
- .chan_priv_rx = SHDMA_SLAVE_MMCIF_RX,
+ .chan_priv_tx = {
+ .slave_id = SHDMA_SLAVE_MMCIF_TX,
+ },
+ .chan_priv_rx = {
+ .slave_id = SHDMA_SLAVE_MMCIF_RX,
+ }
};
static struct sh_mmcif_plat_data sh_mmcif_plat = {
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 7429b47c3ac..381edcd5bc2 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
dp->rcv_buf_len = 4096;
- dp->ds_states = kzalloc(sizeof(ds_states_template),
- GFP_KERNEL);
+ dp->ds_states = kmemdup(ds_states_template,
+ sizeof(ds_states_template), GFP_KERNEL);
if (!dp->ds_states)
goto out_free_rcv_buf;
- memcpy(dp->ds_states, ds_states_template,
- sizeof(ds_states_template));
dp->num_ds_states = ARRAY_SIZE(ds_states_template);
for (i = 0; i < dp->num_ds_states; i++)
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 46614807a57..741df916c12 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
void *new_val;
int err;
- new_val = kmalloc(len, GFP_KERNEL);
+ new_val = kmemdup(val, len, GFP_KERNEL);
if (!new_val)
return -ENOMEM;
- memcpy(new_val, val, len);
-
err = -ENODEV;
mutex_lock(&of_set_property_mutex);
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
index 5175ac2f482..8a7f81743c1 100644
--- a/arch/sparc/mm/btfixup.c
+++ b/arch/sparc/mm/btfixup.c
@@ -302,8 +302,7 @@ void __init btfixup(void)
case 'i': /* INT */
if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
- else if ((insn & 0x80002000) == 0x80002000 &&
- (insn & 0x01800000) != 0x01800000) /* %LO */
+ else if ((insn & 0x80002000) == 0x80002000) /* %LO */
set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
else {
prom_printf(insn_i, p, addr, insn);
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index 94e9a511de8..f80f8ceabc6 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -74,16 +74,6 @@ enum {
*/
void tile_irq_activate(unsigned int irq, int tile_irq_type);
-/*
- * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
- * how to use enable/disable_percpu_irq() to manage interrupts on each
- * core. We can't use the generic enable/disable_irq() because they
- * use a single reference count per irq, rather than per cpu per irq.
- */
-void enable_percpu_irq(unsigned int irq);
-void disable_percpu_irq(unsigned int irq);
-
-
void setup_irq_regs(void);
#endif /* _ASM_TILE_IRQ_H */
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index aa0134db2dd..02e62806501 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
* Remove an irq from the disabled mask. If we're in an interrupt
* context, defer enabling the HW interrupt until we leave.
*/
-void enable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_enable(struct irq_data *d)
{
- get_cpu_var(irq_disable_mask) &= ~(1UL << irq);
+ get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
if (__get_cpu_var(irq_depth) == 0)
- unmask_irqs(1UL << irq);
+ unmask_irqs(1UL << d->irq);
put_cpu_var(irq_disable_mask);
}
-EXPORT_SYMBOL(enable_percpu_irq);
/*
* Add an irq to the disabled mask. We disable the HW interrupt
@@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq);
* in an interrupt context, the return path is careful to avoid
* unmasking a newly disabled interrupt.
*/
-void disable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_disable(struct irq_data *d)
{
- get_cpu_var(irq_disable_mask) |= (1UL << irq);
- mask_irqs(1UL << irq);
+ get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
+ mask_irqs(1UL << d->irq);
put_cpu_var(irq_disable_mask);
}
-EXPORT_SYMBOL(disable_percpu_irq);
/* Mask an interrupt. */
static void tile_irq_chip_mask(struct irq_data *d)
@@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d)
static struct irq_chip tile_irq_chip = {
.name = "tile_irq_chip",
+ .irq_enable = tile_irq_chip_enable,
+ .irq_disable = tile_irq_chip_disable,
.irq_ack = tile_irq_chip_ack,
.irq_eoi = tile_irq_chip_eoi,
.irq_mask = tile_irq_chip_mask,
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 658f2ce426a..b3ed19f8779 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 2a8014cb1ff..9d610d3fb11 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/sections.h>
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index b671a86f451..60290826809 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -18,6 +18,7 @@
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/smp.h>
+#include <linux/stat.h>
#include <hv/hypervisor.h>
/* Return a string queried from the hypervisor, truncated to page size. */
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index a87d2a859ba..2a81d32de0d 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm);
EXPORT_SYMBOL(current_text_addr);
EXPORT_SYMBOL(dump_stack);
+/* arch/tile/kernel/head.S */
+EXPORT_SYMBOL(empty_zero_page);
+
/* arch/tile/lib/, various memcpy files */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__copy_to_user_inatomic);
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index cbe6f4f9eca..1cc6ae477c9 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order)
VM_BUG_ON(!virt_addr_valid((void *)addr));
page = virt_to_page((void *)addr);
if (put_page_testzero(page)) {
- int pages = (1 << order);
homecache_change_page_home(page, order, initial_page_home());
- while (pages--)
- __free_page(page++);
+ if (order == 0) {
+ free_hot_cold_page(page, 0);
+ } else {
+ init_page_count(page);
+ __free_pages(page, order);
+ }
}
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cb9a1044a77..efb42949cc0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -390,7 +390,7 @@ config X86_INTEL_CE
This option compiles in support for the CE4100 SOC for settop
boxes and media devices.
-config X86_INTEL_MID
+config X86_WANT_INTEL_MID
bool "Intel MID platform support"
depends on X86_32
depends on X86_EXTENDED_PLATFORM
@@ -399,7 +399,10 @@ config X86_INTEL_MID
systems which do not have the PCI legacy interfaces (Moorestown,
Medfield). If you are building for a PC class system say N here.
-if X86_INTEL_MID
+if X86_WANT_INTEL_MID
+
+config X86_INTEL_MID
+ bool
config X86_MRST
bool "Moorestown MID platform"
@@ -411,6 +414,7 @@ config X86_MRST
select SPI
select INTEL_SCU_IPC
select X86_PLATFORM_DEVICES
+ select X86_INTEL_MID
---help---
Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
Internet Device(MID) platform. Moorestown consists of two chips:
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
index 4420993acc4..925b605eb5c 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -3,11 +3,15 @@
#include <linux/notifier.h>
-#define IPCMSG_VRTC 0xFA /* Set vRTC device */
-
-/* Command id associated with message IPCMSG_VRTC */
-#define IPC_CMD_VRTC_SETTIME 1 /* Set time */
-#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
+#define IPCMSG_WARM_RESET 0xF0
+#define IPCMSG_COLD_RESET 0xF1
+#define IPCMSG_SOFT_RESET 0xF2
+#define IPCMSG_COLD_BOOT 0xF3
+
+#define IPCMSG_VRTC 0xFA /* Set vRTC device */
+ /* Command id associated with message IPCMSG_VRTC */
+ #define IPC_CMD_VRTC_SETTIME 1 /* Set time */
+ #define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
/* Read single register */
int intel_scu_ipc_ioread8(u16 addr, u8 *data);
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index e6283129c82..93f79094c22 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -31,11 +31,20 @@ enum mrst_cpu_type {
};
extern enum mrst_cpu_type __mrst_cpu_chip;
+
+#ifdef CONFIG_X86_INTEL_MID
+
static inline enum mrst_cpu_type mrst_identify_cpu(void)
{
return __mrst_cpu_chip;
}
+#else /* !CONFIG_X86_INTEL_MID */
+
+#define mrst_identify_cpu() (0)
+
+#endif /* !CONFIG_X86_INTEL_MID */
+
enum mrst_timer_options {
MRST_TIMER_DEFAULT,
MRST_TIMER_APBT_ONLY,
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 084ef95274c..95203d40ffd 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -169,7 +169,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
return native_write_msr_safe(msr, low, high);
}
-/* rdmsr with exception handling */
+/*
+ * rdmsr with exception handling.
+ *
+ * Please note that the exception handling works only after we've
+ * switched to the "smart" #GP handler in trap_init() which knows about
+ * exception tables - using this macro earlier than that causes machine
+ * hangs on boxes which do not implement the @msr in the first argument.
+ */
#define rdmsr_safe(msr, p1, p2) \
({ \
int __err; \
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index c2ff2a1d845..2d2f01ce6dc 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -401,6 +401,7 @@ extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
+bool set_pm_idle_to_default(void);
void stop_this_cpu(void *dummy);
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index fa7b9176b76..431793e5d48 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -32,6 +32,22 @@ extern int no_timer_check;
* (mathieu.desnoyers@polymtl.ca)
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ *
+ * In:
+ *
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * Although we may still have enough bits to store the value of ns,
+ * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
+ * leading to an incorrect result.
+ *
+ * To avoid this, we can decompose 'cycles' into quotient and remainder
+ * of division by SC. Then,
+ *
+ * ns = (quot * SC + rem) * cyc2ns_scale / SC
+ * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
+ *
+ * - sqazi@google.com
*/
DECLARE_PER_CPU(unsigned long, cyc2ns);
@@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
{
+ unsigned long long quot;
+ unsigned long long rem;
int cpu = smp_processor_id();
unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
- ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
+ quot = (cyc >> CYC2NS_SCALE_FACTOR);
+ rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
+ ns += quot * per_cpu(cyc2ns, cpu) +
+ ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
return ns;
}
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 10474fb1185..cf1d73643f6 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -57,6 +57,7 @@
#define UV1_HUB_PART_NUMBER 0x88a5
#define UV2_HUB_PART_NUMBER 0x8eb8
+#define UV2_HUB_PART_NUMBER_X 0x1111
/* Compat: if this #define is present, UV headers support UV2 */
#define UV2_HUB_IS_SUPPORTED 1
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 62ae3001ae0..9d59bbacd4e 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -93,6 +93,8 @@ static int __init early_get_pnodeid(void)
if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+ if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X)
+ uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
uv_hub_info->hub_revision = uv_min_hub_revision_id;
pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c7e46cb3532..0bab2b18bb2 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -442,8 +442,6 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
{
- u32 dummy;
-
early_init_amd_mc(c);
/*
@@ -473,12 +471,12 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
}
#endif
-
- rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
}
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{
+ u32 dummy;
+
#ifdef CONFIG_SMP
unsigned long long value;
@@ -657,6 +655,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
}
}
+
+ rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index a71efcdbb09..97b26356e9e 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -547,6 +547,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
if (tmp != mask_lo) {
printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
+ add_taint(TAINT_FIRMWARE_WORKAROUND);
mask_lo = tmp;
}
}
@@ -693,6 +694,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
/* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
+ wbinvd();
}
static void post_set(void) __releases(set_atomicity_lock)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 640891014b2..2bda212a001 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -312,12 +312,8 @@ int x86_setup_perfctr(struct perf_event *event)
return -EOPNOTSUPP;
}
- /*
- * Do not allow config1 (extended registers) to propagate,
- * there's no sane user-space generalization yet:
- */
if (attr->type == PERF_TYPE_RAW)
- return 0;
+ return x86_pmu_extra_regs(event->attr.config, event);
if (attr->type == PERF_TYPE_HW_CACHE)
return set_ext_hw_attr(hwc, event);
@@ -588,7 +584,7 @@ done:
x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
}
}
- return num ? -ENOSPC : 0;
+ return num ? -EINVAL : 0;
}
/*
@@ -607,7 +603,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
if (is_x86_event(leader)) {
if (n >= max_count)
- return -ENOSPC;
+ return -EINVAL;
cpuc->event_list[n] = leader;
n++;
}
@@ -620,7 +616,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
continue;
if (n >= max_count)
- return -ENOSPC;
+ return -EINVAL;
cpuc->event_list[n] = event;
n++;
@@ -1316,7 +1312,7 @@ static int validate_event(struct perf_event *event)
c = x86_pmu.get_event_constraints(fake_cpuc, event);
if (!c || !c->weight)
- ret = -ENOSPC;
+ ret = -EINVAL;
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(fake_cpuc, event);
@@ -1341,7 +1337,7 @@ static int validate_group(struct perf_event *event)
{
struct perf_event *leader = event->group_leader;
struct cpu_hw_events *fake_cpuc;
- int ret = -ENOSPC, n;
+ int ret = -EINVAL, n;
fake_cpuc = allocate_fake_cpuc();
if (IS_ERR(fake_cpuc))
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index ab6343d2182..3b8a2d30d14 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -199,8 +199,7 @@ static int force_ibs_eilvt_setup(void)
goto out;
}
- pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
- pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
+ pr_info("IBS: LVT offset %d assigned\n", offset);
return 0;
out:
@@ -265,19 +264,23 @@ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *h
static __init int amd_ibs_init(void)
{
u32 caps;
- int ret;
+ int ret = -EINVAL;
caps = __get_ibs_caps();
if (!caps)
return -ENODEV; /* ibs not supported by the cpu */
- if (!ibs_eilvt_valid()) {
- ret = force_ibs_eilvt_setup();
- if (ret) {
- pr_err("Failed to setup IBS, %d\n", ret);
- return ret;
- }
- }
+ /*
+ * Force LVT offset assignment for family 10h: The offsets are
+ * not assigned by the BIOS for this family, so the OS is
+ * responsible for doing it. If the OS assignment fails, fall
+ * back to BIOS settings and try to setup this.
+ */
+ if (boot_cpu_data.x86 == 0x10)
+ force_ibs_eilvt_setup();
+
+ if (!ibs_eilvt_valid())
+ goto out;
get_online_cpus();
ibs_caps = caps;
@@ -287,7 +290,11 @@ static __init int amd_ibs_init(void)
smp_call_function(setup_APIC_ibs, NULL, 1);
put_online_cpus();
- return perf_event_ibs_init();
+ ret = perf_event_ibs_init();
+out:
+ if (ret)
+ pr_err("Failed to setup IBS, %d\n", ret);
+ return ret;
}
/* Since we need the pci subsystem to init ibs we can't do this earlier: */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 2be5ebe9987..8d601b18bf9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1545,6 +1545,13 @@ static void intel_clovertown_quirks(void)
x86_pmu.pebs_constraints = NULL;
}
+static void intel_sandybridge_quirks(void)
+{
+ printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
+ x86_pmu.pebs = 0;
+ x86_pmu.pebs_constraints = NULL;
+}
+
__init int intel_pmu_init(void)
{
union cpuid10_edx edx;
@@ -1694,6 +1701,7 @@ __init int intel_pmu_init(void)
break;
case 42: /* SandyBridge */
+ x86_pmu.quirks = intel_sandybridge_quirks;
case 45: /* SandyBridge, "Romely-EP" */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index c0d238f49db..73da6b64f5b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -493,6 +493,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
unsigned long from = cpuc->lbr_entries[0].from;
unsigned long old_to, to = cpuc->lbr_entries[0].to;
unsigned long ip = regs->ip;
+ int is_64bit = 0;
/*
* We don't need to fixup if the PEBS assist is fault like
@@ -544,7 +545,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
} else
kaddr = (void *)to;
- kernel_insn_init(&insn, kaddr);
+#ifdef CONFIG_X86_64
+ is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
+#endif
+ insn_init(&insn, kaddr, is_64bit);
insn_get_length(&insn);
to += insn.length;
} while (to < ip);
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 492bf1358a7..ef484d9d0a2 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -1268,7 +1268,7 @@ reserve:
}
done:
- return num ? -ENOSPC : 0;
+ return num ? -EINVAL : 0;
}
static __initconst const struct x86_pmu p4_pmu = {
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 3b97a80ce32..c99f9ed013d 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -116,16 +116,16 @@ void show_registers(struct pt_regs *regs)
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
- printk(" Bad EIP value.");
+ printk(KERN_CONT " Bad EIP value.");
break;
}
if (ip == (u8 *)regs->ip)
- printk("<%02x> ", c);
+ printk(KERN_CONT "<%02x> ", c);
else
- printk("%02x ", c);
+ printk(KERN_CONT "%02x ", c);
}
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 19853ad8afc..6d728d9284b 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -284,16 +284,16 @@ void show_registers(struct pt_regs *regs)
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
- printk(" Bad RIP value.");
+ printk(KERN_CONT " Bad RIP value.");
break;
}
if (ip == (u8 *)regs->ip)
- printk("<%02x> ", c);
+ printk(KERN_CONT "<%02x> ", c);
else
- printk("%02x ", c);
+ printk(KERN_CONT "%02x ", c);
}
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index b946a9eac7d..1bb0bf4d92c 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1049,6 +1049,14 @@ int hpet_rtc_timer_init(void)
}
EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
+static void hpet_disable_rtc_channel(void)
+{
+ unsigned long cfg;
+ cfg = hpet_readl(HPET_T1_CFG);
+ cfg &= ~HPET_TN_ENABLE;
+ hpet_writel(cfg, HPET_T1_CFG);
+}
+
/*
* The functions below are called from rtc driver.
* Return 0 if HPET is not being used.
@@ -1060,6 +1068,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
return 0;
hpet_rtc_flags &= ~bit_mask;
+ if (unlikely(!hpet_rtc_flags))
+ hpet_disable_rtc_channel();
+
return 1;
}
EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
@@ -1125,15 +1136,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
static void hpet_rtc_timer_reinit(void)
{
- unsigned int cfg, delta;
+ unsigned int delta;
int lost_ints = -1;
- if (unlikely(!hpet_rtc_flags)) {
- cfg = hpet_readl(HPET_T1_CFG);
- cfg &= ~HPET_TN_ENABLE;
- hpet_writel(cfg, HPET_T1_CFG);
- return;
- }
+ if (unlikely(!hpet_rtc_flags))
+ hpet_disable_rtc_channel();
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
delta = hpet_default_delta;
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index acf8fbf8fbd..69bca468c47 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -38,6 +38,9 @@ static inline void stack_overflow_check(struct pt_regs *regs)
#ifdef CONFIG_DEBUG_STACKOVERFLOW
u64 curbase = (u64)task_stack_page(current);
+ if (user_mode_vm(regs))
+ return;
+
WARN_ONCE(regs->sp >= curbase &&
regs->sp <= curbase + THREAD_SIZE &&
regs->sp < curbase + sizeof(struct thread_info) +
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index f2d2a664e79..9d46f5e43b5 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -256,7 +256,7 @@ static int __init microcode_dev_init(void)
return 0;
}
-static void microcode_dev_exit(void)
+static void __exit microcode_dev_exit(void)
{
misc_deregister(&microcode_dev);
}
@@ -519,10 +519,8 @@ static int __init microcode_init(void)
microcode_pdev = platform_device_register_simple("microcode", -1,
NULL, 0);
- if (IS_ERR(microcode_pdev)) {
- microcode_dev_exit();
+ if (IS_ERR(microcode_pdev))
return PTR_ERR(microcode_pdev);
- }
get_online_cpus();
mutex_lock(&microcode_mutex);
@@ -532,14 +530,12 @@ static int __init microcode_init(void)
mutex_unlock(&microcode_mutex);
put_online_cpus();
- if (error) {
- platform_device_unregister(microcode_pdev);
- return error;
- }
+ if (error)
+ goto out_pdev;
error = microcode_dev_init();
if (error)
- return error;
+ goto out_sysdev_driver;
register_syscore_ops(&mc_syscore_ops);
register_hotcpu_notifier(&mc_cpu_notifier);
@@ -548,6 +544,20 @@ static int __init microcode_init(void)
" <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
return 0;
+
+out_sysdev_driver:
+ get_online_cpus();
+ mutex_lock(&microcode_mutex);
+
+ sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
+
+ mutex_unlock(&microcode_mutex);
+ put_online_cpus();
+
+out_pdev:
+ platform_device_unregister(microcode_pdev);
+ return error;
+
}
module_init(microcode_init);
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 9103b89c145..0741b062a30 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m)
}
#endif
+ set_bit(m->busid, mp_bus_not_pci);
if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
- set_bit(m->busid, mp_bus_not_pci);
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
#endif
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index b9b3b1a5164..ee5d4fbd53b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -403,6 +403,14 @@ void default_idle(void)
EXPORT_SYMBOL(default_idle);
#endif
+bool set_pm_idle_to_default(void)
+{
+ bool ret = !!pm_idle;
+
+ pm_idle = default_idle;
+
+ return ret;
+}
void stop_this_cpu(void *dummy)
{
local_irq_disable();
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index b78643d0f9a..03920a15a63 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -553,4 +553,17 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
+ quirk_amd_nb_node);
+
#endif
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e334be1182b..37a458b521a 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup);
*/
/*
- * Some machines require the "reboot=b" commandline option,
+ * Some machines require the "reboot=b" or "reboot=k" commandline options,
* this quirk makes that automatic.
*/
static int __init set_bios_reboot(const struct dmi_system_id *d)
@@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
return 0;
}
+static int __init set_kbd_reboot(const struct dmi_system_id *d)
+{
+ if (reboot_type != BOOT_KBD) {
+ reboot_type = BOOT_KBD;
+ printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
+ }
+ return 0;
+}
+
static struct dmi_system_id __initdata reboot_dmi_table[] = {
{ /* Handle problems with rebooting on Dell E520's */
.callback = set_bios_reboot,
@@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
},
{ /* Handle reboot issue on Acer Aspire one */
- .callback = set_bios_reboot,
+ .callback = set_kbd_reboot,
.ident = "Acer Aspire One A110",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -443,6 +452,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
},
},
+ { /* Handle problems with rebooting on the OptiPlex 990. */
+ .callback = set_pci_reboot,
+ .ident = "Dell OptiPlex 990",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+ },
+ },
{ }
};
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 348ce016a83..af6db6ec5b2 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -12,6 +12,7 @@
#include <asm/vsyscall.h>
#include <asm/x86_init.h>
#include <asm/time.h>
+#include <asm/mrst.h>
#ifdef CONFIG_X86_32
/*
@@ -242,6 +243,10 @@ static __init int add_rtc_cmos(void)
if (of_have_populated_dt())
return 0;
+ /* Intel MID platforms don't have ioport rtc */
+ if (mrst_identify_cpu())
+ return -ENODEV;
+
platform_device_register(&rtc_device);
dev_info(&rtc_device.dev,
"registered platform RTC device (no PNP device found)\n");
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index ea305856151..dd74e46828c 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
+ if (PageTail(page))
+ get_huge_page_tail(page);
(*nr)++;
page++;
refs++;
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index b4996266210..f4f29b19fac 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
set_pte(kmap_pte-idx, mk_pte(page, prot));
+ arch_flush_lazy_mmu_mode();
return (void *)vaddr;
}
@@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
+ arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_DEBUG_HIGHMEM
else {
diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c
index cdfe4c54dec..f148cf65267 100644
--- a/arch/x86/oprofile/init.c
+++ b/arch/x86/oprofile/init.c
@@ -21,6 +21,7 @@ extern int op_nmi_timer_init(struct oprofile_operations *ops);
extern void op_nmi_exit(void);
extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
+static int nmi_timer;
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
@@ -31,8 +32,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
#ifdef CONFIG_X86_LOCAL_APIC
ret = op_nmi_init(ops);
#endif
+ nmi_timer = (ret != 0);
#ifdef CONFIG_X86_IO_APIC
- if (ret < 0)
+ if (nmi_timer)
ret = op_nmi_timer_init(ops);
#endif
ops->backtrace = x86_backtrace;
@@ -44,6 +46,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
void oprofile_arch_exit(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
- op_nmi_exit();
+ if (!nmi_timer)
+ op_nmi_exit();
#endif
}
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index e36bf714cb7..40e446941dd 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -39,43 +39,14 @@
*/
static unsigned long efi_rt_eflags;
-static pgd_t efi_bak_pg_dir_pointer[2];
void efi_call_phys_prelog(void)
{
- unsigned long cr4;
- unsigned long temp;
struct desc_ptr gdt_descr;
local_irq_save(efi_rt_eflags);
- /*
- * If I don't have PAE, I should just duplicate two entries in page
- * directory. If I have PAE, I just need to duplicate one entry in
- * page directory.
- */
- cr4 = read_cr4_safe();
-
- if (cr4 & X86_CR4_PAE) {
- efi_bak_pg_dir_pointer[0].pgd =
- swapper_pg_dir[pgd_index(0)].pgd;
- swapper_pg_dir[0].pgd =
- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
- } else {
- efi_bak_pg_dir_pointer[0].pgd =
- swapper_pg_dir[pgd_index(0)].pgd;
- efi_bak_pg_dir_pointer[1].pgd =
- swapper_pg_dir[pgd_index(0x400000)].pgd;
- swapper_pg_dir[pgd_index(0)].pgd =
- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
- temp = PAGE_OFFSET + 0x400000;
- swapper_pg_dir[pgd_index(0x400000)].pgd =
- swapper_pg_dir[pgd_index(temp)].pgd;
- }
-
- /*
- * After the lock is released, the original page table is restored.
- */
+ load_cr3(initial_page_table);
__flush_tlb_all();
gdt_descr.address = __pa(get_cpu_gdt_table(0));
@@ -85,28 +56,13 @@ void efi_call_phys_prelog(void)
void efi_call_phys_epilog(void)
{
- unsigned long cr4;
struct desc_ptr gdt_descr;
gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
- cr4 = read_cr4_safe();
-
- if (cr4 & X86_CR4_PAE) {
- swapper_pg_dir[pgd_index(0)].pgd =
- efi_bak_pg_dir_pointer[0].pgd;
- } else {
- swapper_pg_dir[pgd_index(0)].pgd =
- efi_bak_pg_dir_pointer[0].pgd;
- swapper_pg_dir[pgd_index(0x400000)].pgd =
- efi_bak_pg_dir_pointer[1].pgd;
- }
-
- /*
- * After the lock is released, the original page table is restored.
- */
+ load_cr3(swapper_pg_dir);
__flush_tlb_all();
local_irq_restore(efi_rt_eflags);
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index b1489a06a49..ad4ec1cb097 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -76,6 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
EXPORT_SYMBOL_GPL(sfi_mrtc_array);
int sfi_mrtc_num;
+static void mrst_power_off(void)
+{
+ if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+ intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
+}
+
+static void mrst_reboot(void)
+{
+ if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+ intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
+ else
+ intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+}
+
/* parse all the mtimer info to a static mtimer array */
static int __init sfi_parse_mtmr(struct sfi_table_header *table)
{
@@ -265,17 +279,6 @@ static int mrst_i8042_detect(void)
return 0;
}
-/* Reboot and power off are handled by the SCU on a MID device */
-static void mrst_power_off(void)
-{
- intel_scu_ipc_simple_command(0xf1, 1);
-}
-
-static void mrst_reboot(void)
-{
- intel_scu_ipc_simple_command(0xf1, 0);
-}
-
/*
* Moorestown does not have external NMI source nor port 0x61 to report
* NMI status. The possible NMI sources are from pmu as a result of NMI
@@ -484,6 +487,46 @@ static void __init *max7315_platform_data(void *info)
return max7315;
}
+static void *tca6416_platform_data(void *info)
+{
+ static struct pca953x_platform_data tca6416;
+ struct i2c_board_info *i2c_info = info;
+ int gpio_base, intr;
+ char base_pin_name[SFI_NAME_LEN + 1];
+ char intr_pin_name[SFI_NAME_LEN + 1];
+
+ strcpy(i2c_info->type, "tca6416");
+ strcpy(base_pin_name, "tca6416_base");
+ strcpy(intr_pin_name, "tca6416_int");
+
+ gpio_base = get_gpio_by_name(base_pin_name);
+ intr = get_gpio_by_name(intr_pin_name);
+
+ if (gpio_base == -1)
+ return NULL;
+ tca6416.gpio_base = gpio_base;
+ if (intr != -1) {
+ i2c_info->irq = intr + MRST_IRQ_OFFSET;
+ tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
+ } else {
+ i2c_info->irq = -1;
+ tca6416.irq_base = -1;
+ }
+ return &tca6416;
+}
+
+static void *mpu3050_platform_data(void *info)
+{
+ struct i2c_board_info *i2c_info = info;
+ int intr = get_gpio_by_name("mpu3050_int");
+
+ if (intr == -1)
+ return NULL;
+
+ i2c_info->irq = intr + MRST_IRQ_OFFSET;
+ return NULL;
+}
+
static void __init *emc1403_platform_data(void *info)
{
static short intr2nd_pdata;
@@ -646,12 +689,15 @@ static void *msic_ocd_platform_data(void *info)
static const struct devs_id __initconst device_ids[] = {
{"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
{"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
+ {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
{"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
{"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
{"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
+ {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
{"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
{"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
{"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
+ {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
/* MSIC subdevices */
{"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 38d0af4fefe..b2c7179fa26 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void)
domid_t domid = DOMID_SELF;
int ret;
- ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
- if (ret > 0)
- max_pages = ret;
+ /*
+ * For the initial domain we use the maximum reservation as
+ * the maximum page.
+ *
+ * For guest domains the current maximum reservation reflects
+ * the current maximum rather than the static maximum. In this
+ * case the e820 map provided to us will cover the static
+ * maximum region.
+ */
+ if (xen_initial_domain()) {
+ ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+ if (ret > 0)
+ max_pages = ret;
+ }
+
return min(max_pages, MAX_DOMAIN_PAGES);
}
@@ -410,6 +422,6 @@ void __init xen_arch_setup(void)
#endif
disable_cpuidle();
boot_option_idle_override = IDLE_HALT;
-
+ WARN_ON(set_pm_idle_to_default());
fiddle_vdso();
}
diff --git a/block/blk-core.c b/block/blk-core.c
index ea70e6c80cd..15de223c7f9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (drain_all)
blk_throtl_drain(q);
- __blk_run_queue(q);
+ /*
+ * This function might be called on a queue which failed
+ * driver init after queue creation. Some drivers
+ * (e.g. fd) get unhappy in such cases. Kick queue iff
+ * dispatch queue has something on it.
+ */
+ if (!list_empty(&q->queue_head))
+ __blk_run_queue(q);
if (drain_all)
nr_rqs = q->rq.count[0] + q->rq.count[1];
@@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
q->backing_dev_info.name = "block";
+ q->node = node_id;
err = bdi_init(&q->backing_dev_info);
if (err) {
@@ -551,7 +559,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
- q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+ q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
blk_cleanup_queue(uninit_q);
@@ -563,18 +571,9 @@ struct request_queue *
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
spinlock_t *lock)
{
- return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
- spinlock_t *lock, int node_id)
-{
if (!q)
return NULL;
- q->node = node_id;
if (blk_init_free_list(q))
return NULL;
@@ -604,7 +603,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
return NULL;
}
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 16ace89613b..4c12869fcf7 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3184,7 +3184,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
}
}
- if (ret)
+ if (ret && ret != -EEXIST)
printk(KERN_ERR "cfq: cic link failed!\n");
return ret;
@@ -3200,6 +3200,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
struct io_context *ioc = NULL;
struct cfq_io_context *cic;
+ int ret;
might_sleep_if(gfp_mask & __GFP_WAIT);
@@ -3207,6 +3208,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
if (!ioc)
return NULL;
+retry:
cic = cfq_cic_lookup(cfqd, ioc);
if (cic)
goto out;
@@ -3215,7 +3217,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
if (cic == NULL)
goto err;
- if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+ ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
+ if (ret == -EEXIST) {
+ /* someone has linked cic to ioc already */
+ cfq_cic_free(cic);
+ goto retry;
+ } else if (ret)
goto err_free;
out:
@@ -4036,6 +4043,11 @@ static void *cfq_init_queue(struct request_queue *q)
if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
kfree(cfqg);
+
+ spin_lock(&cic_index_lock);
+ ida_remove(&cic_index_ida, cfqd->cic_index);
+ spin_unlock(&cic_index_lock);
+
kfree(cfqd);
return NULL;
}
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 127408069ca..631b9477b99 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -932,7 +932,8 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi);
+ struct timespec *time, char **buf,
+ struct pstore_info *psi);
static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
size_t size, struct pstore_info *psi);
static int erst_clearer(enum pstore_type_id type, u64 id,
@@ -986,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi)
}
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *time, char **buf,
+ struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
u64 record_id;
- struct cper_pstore_record *rcd = (struct cper_pstore_record *)
- (erst_info.buf - sizeof(*rcd));
+ struct cper_pstore_record *rcd;
+ size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
if (erst_disable)
return -ENODEV;
+ rcd = kmalloc(rcd_len, GFP_KERNEL);
+ if (!rcd) {
+ rc = -ENOMEM;
+ goto out;
+ }
skip:
rc = erst_get_record_id_next(&reader_pos, &record_id);
if (rc)
@@ -1004,22 +1011,27 @@ skip:
/* no more record */
if (record_id == APEI_ERST_INVALID_RECORD_ID) {
- rc = -1;
+ rc = -EINVAL;
goto out;
}
- len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
- erst_info.bufsize);
+ len = erst_read(record_id, &rcd->hdr, rcd_len);
/* The record may be cleared by others, try read next record */
if (len == -ENOENT)
goto skip;
- else if (len < 0) {
- rc = -1;
+ else if (len < sizeof(*rcd)) {
+ rc = -EIO;
goto out;
}
if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
goto skip;
+ *buf = kmalloc(len, GFP_KERNEL);
+ if (*buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ memcpy(*buf, rcd->data, len - sizeof(*rcd));
*id = record_id;
if (uuid_le_cmp(rcd->sec_hdr.section_type,
CPER_SECTION_TYPE_DMESG) == 0)
@@ -1037,6 +1049,7 @@ skip:
time->tv_nsec = 0;
out:
+ kfree(rcd);
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index ec555951176..43b875810d1 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -67,7 +67,7 @@ static int __init ahci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
const struct platform_device_id *id = platform_get_device_id(pdev);
- struct ata_port_info pi = ahci_port_info[id->driver_data];
+ struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ahci_host_priv *hpriv;
struct ata_host *host;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 63d53277d6a..4cadfa28f94 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2533,10 +2533,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
if (rc)
goto out;
+#ifdef CONFIG_ATA_BMDMA
if (bmdma)
/* prepare and activate BMDMA host */
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
else
+#endif
/* prepare and activate SFF host */
rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
if (rc)
@@ -2544,10 +2546,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
host->private_data = host_priv;
host->flags |= hflags;
+#ifdef CONFIG_ATA_BMDMA
if (bmdma) {
pci_set_master(pdev);
rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
} else
+#endif
rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
out:
if (rc == 0)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d8b3d89db04..919daa7cd5b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1743,8 +1743,10 @@ void device_shutdown(void)
*/
list_del_init(&dev->kobj.entry);
spin_unlock(&devices_kset->list_lock);
- /* Disable all device's runtime power management */
- pm_runtime_disable(dev);
+
+ /* Don't allow any more runtime suspends */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_barrier(dev);
if (dev->bus && dev->bus->shutdown) {
dev_dbg(dev, "shutdown\n");
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8004ac30a7a..587cce57ada 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2601,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
c->Request.Timeout = 0;
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ c->Request.CDB[7] = (size >> 8) & 0xFF;
+ c->Request.CDB[8] = size & 0xFF;
break;
case TEST_UNIT_READY:
c->Request.CDBLen = 6;
@@ -4880,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
{
if (h->msix_vector || h->msi_vector) {
if (!request_irq(h->intr[h->intr_mode], msixhandler,
- IRQF_DISABLED, h->devname, h))
+ 0, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get msi irq %d"
" for %s\n", h->intr[h->intr_mode],
@@ -4889,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
}
if (!request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_DISABLED, h->devname, h))
+ IRQF_SHARED, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 68b205a9338..1e888c9e85b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -422,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do support discard if
+ * image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
@@ -797,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
}
q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = inode->i_sb->s_blocksize;
+ q->limits.discard_alignment = 0;
q->limits.max_discard_sectors = UINT_MAX >> 9;
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 65cc424359b..148ab944378 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list); /* clients */
static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size);
static ssize_t rbd_snap_add(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
u32 snap_count = le32_to_cpu(ondisk->snap_count);
int ret = -ENOMEM;
+ if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+ return -ENXIO;
+ }
+
init_rwsem(&header->snap_rwsem);
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1356,32 +1356,6 @@ fail:
}
/*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
- u64 snapid,
- const char *obj)
-{
- struct ceph_osd_req_op *ops;
- int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
- if (ret < 0)
- return ret;
-
- ops[0].snap.snapid = snapid;
-
- ret = rbd_req_sync_op(dev, NULL,
- CEPH_NOSNAP,
- 0,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- 1, obj, 0, 0, NULL, NULL, NULL);
-
- rbd_destroy_ops(ops);
-
- return ret;
-}
-
-/*
* Request sync osd read
*/
static int rbd_req_sync_exec(struct rbd_device *dev,
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
goto out_dh;
rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
- if (rc < 0)
+ if (rc < 0) {
+ if (rc == -ENXIO) {
+ pr_warning("unrecognized header format"
+ " for image %s", rbd_dev->obj);
+ }
goto out_dh;
+ }
if (snap_count != header->total_snaps) {
snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_current_snap.attr,
&dev_attr_refresh.attr,
&dev_attr_create_snap.attr,
- &dev_attr_rollback_snap.attr,
NULL
};
@@ -2424,64 +2401,6 @@ err_unlock:
return ret;
}
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
- int ret;
- u64 snapid;
- u64 cur_ofs;
- char *seg_name = NULL;
- char *snap_name = kmalloc(count + 1, GFP_KERNEL);
- ret = -ENOMEM;
- if (!snap_name)
- return ret;
-
- /* parse snaps add command */
- snprintf(snap_name, count, "%s", buf);
- seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
- if (!seg_name)
- goto done;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
- if (ret < 0)
- goto done_unlock;
-
- dout("snapid=%lld\n", snapid);
-
- cur_ofs = 0;
- while (cur_ofs < rbd_dev->header.image_size) {
- cur_ofs += rbd_get_segment(&rbd_dev->header,
- rbd_dev->obj,
- cur_ofs, (u64)-1,
- seg_name, NULL);
- dout("seg_name=%s\n", seg_name);
-
- ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
- if (ret < 0)
- pr_warning("could not roll back obj %s err=%d\n",
- seg_name, ret);
- }
-
- ret = __rbd_update_snaps(rbd_dev);
- if (ret < 0)
- goto done_unlock;
-
- ret = count;
-
-done_unlock:
- mutex_unlock(&ctl_mutex);
-done:
- kfree(seg_name);
- kfree(snap_name);
-
- return ret;
-}
-
static struct bus_attribute rbd_bus_attrs[] = {
__ATTR(add, S_IWUSR, NULL, rbd_add),
__ATTR(remove, S_IWUSR, NULL, rbd_remove),
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ae3e167e17a..89ddab127e3 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -16,6 +16,8 @@
* handle GCR disks
*/
+#undef DEBUG
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -36,13 +38,11 @@
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
#define MAX_FLOPPIES 2
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
enum swim_state {
idle,
locating,
@@ -177,7 +177,6 @@ struct swim3 {
struct floppy_state {
enum swim_state state;
- spinlock_t lock;
struct swim3 __iomem *swim3; /* hardware registers */
struct dbdma_regs __iomem *dma; /* DMA controller registers */
int swim3_intr; /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
int wanted;
struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+ int index;
+ struct request *cur_req;
};
+#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...) do { } while(0)
+#endif
+
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
0, 0, 0, 0, 0, 0
};
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
- void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
static void act(struct floppy_state *fs);
static void scan_timeout(unsigned long data);
static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
{
- if (__blk_end_request(fd_req, err, nr_bytes))
- return true;
+ struct request *req = fs->cur_req;
+ int rc;
- fd_req = NULL;
- return false;
-}
+ swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
+ err, nr_bytes, req);
-static bool swim3_end_request_cur(int err)
-{
- return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+ if (err)
+ nr_bytes = blk_rq_cur_bytes(req);
+ rc = __blk_end_request(req, err, nr_bytes);
+ if (rc)
+ return true;
+ fs->cur_req = NULL;
+ return false;
}
static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
return (stat & DATA) == 0;
}
-static void do_fd_request(struct request_queue * q)
-{
- int i;
-
- for(i=0; i<floppy_count; i++) {
- struct floppy_state *fs = &floppy_states[i];
- if (fs->mdev->media_bay &&
- check_media_bay(fs->mdev->media_bay) != MB_FD)
- continue;
- start_request(fs);
- }
-}
-
static void start_request(struct floppy_state *fs)
{
struct request *req;
unsigned long x;
+ swim3_dbg("start request, initial state=%d\n", fs->state);
+
if (fs->state == idle && fs->wanted) {
fs->state = available;
wake_up(&fs->wait);
return;
}
while (fs->state == idle) {
- if (!fd_req) {
- fd_req = blk_fetch_request(swim3_queue);
- if (!fd_req)
+ swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+ if (!fs->cur_req) {
+ fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+ swim3_dbg(" fetched request %p\n", fs->cur_req);
+ if (!fs->cur_req)
break;
}
- req = fd_req;
-#if 0
- printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
- req->rq_disk->disk_name, req->cmd,
- (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
- printk(" errors=%d current_nr_sectors=%u\n",
- req->errors, blk_rq_cur_sectors(req));
+ req = fs->cur_req;
+
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD) {
+ swim3_dbg("%s", " media bay absent, dropping req\n");
+ swim3_end_request(fs, -ENODEV, 0);
+ continue;
+ }
+
+#if 0 /* This is really too verbose */
+ swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+ req->rq_disk->disk_name, req->cmd,
+ (long)blk_rq_pos(req), blk_rq_sectors(req),
+ req->buffer);
+ swim3_dbg(" errors=%d current_nr_sectors=%u\n",
+ req->errors, blk_rq_cur_sectors(req));
#endif
if (blk_rq_pos(req) >= fs->total_secs) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
+ (long)blk_rq_pos(req), (long)fs->total_secs);
+ swim3_end_request(fs, -EIO, 0);
continue;
}
if (fs->ejected) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " disk ejected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " try to write, disk write protected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
}
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
- fd_req = req;
fs->state = do_transfer;
fs->retries = 0;
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
}
}
+static void do_fd_request(struct request_queue * q)
+{
+ start_request(q->queuedata);
+}
+
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long))
{
- unsigned long flags;
-
- spin_lock_irqsave(&fs->lock, flags);
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
- spin_unlock_irqrestore(&fs->lock, flags);
}
static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
+ struct request *req = fs->cur_req;
- if (blk_rq_cur_sectors(fd_req) <= 0) {
- printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+ if (blk_rq_cur_sectors(req) <= 0) {
+ swim3_warn("%s", "Transfer 0 sectors ?\n");
return;
}
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
- if (n > blk_rq_cur_sectors(fd_req))
- n = blk_rq_cur_sectors(fd_req);
+ if (n > blk_rq_cur_sectors(req))
+ n = blk_rq_cur_sectors(req);
}
+
+ swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
+ fs->req_sector, fs->secpertrack, fs->head, n);
+
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
out_8(&sw->sector, fs->req_sector);
out_8(&sw->nsect, n);
out_8(&sw->gap3, 0);
out_le32(&dr->cmdptr, virt_to_bus(cp));
- if (rq_data_dir(fd_req) == WRITE) {
+ if (rq_data_dir(req) == WRITE) {
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
- init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+ init_dma(cp, OUTPUT_MORE, req->buffer, 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
- init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+ init_dma(cp, INPUT_LAST, req->buffer, n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
in_8(&sw->error);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
out_8(&sw->control_bis, WRITE_SECTORS);
in_8(&sw->intr);
out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
static void act(struct floppy_state *fs)
{
for (;;) {
+ swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+ fs->state, fs->req_cyl, fs->cur_cyl);
+
switch (fs->state) {
case idle:
return; /* XXX shouldn't get here */
case locating:
if (swim3_readbit(fs, TRACK_ZERO)) {
+ swim3_dbg("%s", " locate track 0\n");
fs->cur_cyl = 0;
if (fs->req_cyl == 0)
fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
break;
}
if (fs->req_cyl == fs->cur_cyl) {
- printk("whoops, seeking 0\n");
+ swim3_warn("%s", "Whoops, seeking 0\n");
fs->state = do_transfer;
break;
}
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+ fs->req_cyl, fs->cur_cyl);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
return;
}
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
return;
default:
- printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+ swim3_err("Unknown state %d\n", fs->state);
return;
}
}
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* scan timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void seek_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* seek timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
- printk(KERN_ERR "swim3: seek timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void settle_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* settle timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
if (swim3_readbit(fs, SEEK_COMPLETE)) {
out_8(&sw->select, RELAX);
fs->state = locating;
act(fs);
- return;
+ goto unlock;
}
out_8(&sw->select, RELAX);
if (fs->settle_time < 2*HZ) {
++fs->settle_time;
set_timeout(fs, 1, settle_timeout);
- return;
+ goto unlock;
}
- printk(KERN_ERR "swim3: seek settle timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek settle timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ unlock:
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
+ unsigned long flags;
int n;
+ swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_le32(&dr->control, RUN << 16);
/* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
- (rq_data_dir(fd_req)==WRITE? "writ": "read"),
- (long)blk_rq_pos(fd_req));
- swim3_end_request_cur(-EIO);
+ swim3_err("Timeout %sing sector %ld\n",
+ (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+ (long)blk_rq_pos(fs->cur_req));
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
int stat, resid;
struct dbdma_regs __iomem *dr;
struct dbdma_cmd *cp;
+ unsigned long flags;
+ struct request *req = fs->cur_req;
+
+ swim3_dbg("* interrupt, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
intr = in_8(&sw->intr);
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
if ((intr & ERROR_INTR) && fs->state != do_transfer)
- printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
+ swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
switch (fs->state) {
case locating:
if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
- printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+ swim3_err("%s", "Seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->cur_cyl = sw->ctrack;
fs->cur_sector = sw->csect;
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
- printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
- fs->expect_cyl, fs->cur_cyl);
+ swim3_err("Expected cyl %d, got %d\n",
+ fs->expect_cyl, fs->cur_cyl);
fs->state = do_transfer;
act(fs);
}
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
++cp;
/*
* Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
- blk_update_request(fd_req, 0, n << 9);
+ blk_update_request(req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
++fs->retries;
act(fs);
} else {
- printk("swim3: error %sing block %ld (err=%x)\n",
- rq_data_dir(fd_req) == WRITE? "writ": "read",
- (long)blk_rq_pos(fd_req), err);
- swim3_end_request_cur(-EIO);
+ swim3_err("Error %sing block %ld (err=%x)\n",
+ rq_data_dir(req) == WRITE? "writ": "read",
+ (long)blk_rq_pos(req), err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
}
} else {
if ((stat & ACTIVE) == 0 || resid != 0) {
/* musta been an error */
- printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
- printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
- swim3_end_request_cur(-EIO);
+ swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+ swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
break;
}
- if (swim3_end_request(0, fs->scount << 9)) {
+ fs->retries = 0;
+ if (swim3_end_request(fs, 0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
start_request(fs);
break;
default:
- printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+ swim3_err("Don't know what to do in state %d\n", fs->state);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
return IRQ_HANDLED;
}
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
}
*/
+/* Called under the mutex to grab exclusive access to a drive */
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
- if (fs->state != idle) {
+ swim3_dbg("%s", "-> grab drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
+ if (fs->state != idle && fs->state != available) {
++fs->wanted;
while (fs->state != available) {
+ spin_unlock_irqrestore(&swim3_lock, flags);
if (interruptible && signal_pending(current)) {
--fs->wanted;
- spin_unlock_irqrestore(&fs->lock, flags);
return -EINTR;
}
interruptible_sleep_on(&fs->wait);
+ spin_lock_irqsave(&swim3_lock, flags);
}
--fs->wanted;
}
fs->state = state;
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
+
return 0;
}
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
+ swim3_dbg("%s", "-> release drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
start_request(fs);
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
+
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+ struct floppy_state *fs = macio_get_drvdata(mdev);
+ struct swim3 __iomem *sw = fs->swim3;
+
+ if (!fs)
+ return;
+ if (mb_state != MB_FD)
+ return;
+
+ /* Clear state */
+ out_8(&sw->intr_enable, 0);
+ in_8(&sw->intr);
+ in_8(&sw->error);
+}
+
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
+ /* Do this first for message macros */
+ memset(fs, 0, sizeof(*fs));
+ fs->mdev = mdev;
+ fs->index = index;
+
/* Check & Request resources */
if (macio_resource_count(mdev) < 2) {
- printk(KERN_WARNING "ifd%d: no address for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "No address in device-tree\n");
return -ENXIO;
}
- if (macio_irq_count(mdev) < 2) {
- printk(KERN_WARNING "fd%d: no intrs for device %s\n",
- index, swim->full_name);
+ if (macio_irq_count(mdev) < 1) {
+ swim3_err("%s", "No interrupt in device-tree\n");
+ return -ENXIO;
}
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
- printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request mmio resource\n");
return -EBUSY;
}
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
- printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request dma resource\n");
macio_release_resource(mdev, 0);
return -EBUSY;
}
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
- memset(fs, 0, sizeof(*fs));
- spin_lock_init(&fs->lock);
fs->state = idle;
fs->swim3 = (struct swim3 __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x200);
if (fs->swim3 == NULL) {
- printk("fd%d: couldn't map registers for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map mmio registers\n");
rc = -ENOMEM;
goto out_release;
}
fs->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x200);
if (fs->dma == NULL) {
- printk("fd%d: couldn't map DMA for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map dma registers\n");
iounmap(fs->swim3);
rc = -ENOMEM;
goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
- fs->mdev = mdev;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
+ if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+ swim3_mb_event(mdev, MB_FD);
+
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
- printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
- index, fs->swim3_intr, swim->full_name);
+ swim3_err("%s", "Couldn't request interrupt\n");
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
goto out_unmap;
return -EBUSY;
}
-/*
- if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
- printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
- fs->dma_intr);
- return -EBUSY;
- }
-*/
init_timer(&fs->timeout);
- printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+ swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
- int i, rc;
struct gendisk *disk;
+ int index, rc;
+
+ index = floppy_count++;
+ if (index >= MAX_FLOPPIES)
+ return -ENXIO;
/* Add the drive */
- rc = swim3_add_device(mdev, floppy_count);
+ rc = swim3_add_device(mdev, index);
if (rc)
return rc;
+ /* Now register that disk. Same comment about failure handling */
+ disk = disks[index] = alloc_disk(1);
+ if (disk == NULL)
+ return -ENOMEM;
+ disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+ if (disk->queue == NULL) {
+ put_disk(disk);
+ return -ENOMEM;
+ }
+ disk->queue->queuedata = &floppy_states[index];
- /* Now create the queue if not there yet */
- if (swim3_queue == NULL) {
+ if (index == 0) {
/* If we failed, there isn't much we can do as the driver is still
* too dumb to remove the device, just bail out
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return 0;
- swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
- if (swim3_queue == NULL) {
- unregister_blkdev(FLOPPY_MAJOR, "fd");
- return 0;
- }
}
- /* Now register that disk. Same comment about failure handling */
- i = floppy_count++;
- disk = disks[i] = alloc_disk(1);
- if (disk == NULL)
- return 0;
-
disk->major = FLOPPY_MAJOR;
- disk->first_minor = i;
+ disk->first_minor = index;
disk->fops = &floppy_fops;
- disk->private_data = &floppy_states[i];
- disk->queue = swim3_queue;
+ disk->private_data = &floppy_states[index];
disk->flags |= GENHD_FL_REMOVABLE;
- sprintf(disk->disk_name, "fd%d", i);
+ sprintf(disk->disk_name, "fd%d", index);
set_capacity(disk, 2880);
add_disk(disk);
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
.of_match_table = swim3_match,
},
.probe = swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+ .mediabay_event = swim3_mb_event,
+#endif
#if 0
.suspend = swim3_suspend,
.resume = swim3_resume,
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11b41fd40c2..5ccf142ef0b 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,7 +188,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787.
+ Marvell Bluetooth devices, such as 8688/8787/8797.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787 chipsets are
- supported.
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797
+ chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
into the kernel or say M to compile it as module.
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 9ef48167e2c..27b74b0d547 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -65,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
.io_port_1 = 0x01,
.io_port_2 = 0x02,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
.host_int_mask = 0x02,
.host_intstatus = 0x03,
@@ -92,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
- .reg = &btmrvl_reg_8787,
+ .reg = &btmrvl_reg_87xx,
+ .sd_blksz_fw_dl = 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8797_uapsta.bin",
+ .reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -103,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+ /* Marvell SD8797 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
{ } /* Terminating entry */
};
@@ -1076,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE("sd8688_helper.bin");
MODULE_FIRMWARE("sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index fe4ebc375b3..eabc437ce50 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -777,9 +777,8 @@ skip_waking:
usb_mark_last_busy(data->udev);
}
- usb_free_urb(urb);
-
done:
+ usb_free_urb(urb);
return err;
}
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index c2917ffad2c..34767a6d7f4 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -139,6 +139,8 @@
#define IPMI_WDOG_SET_TIMER 0x24
#define IPMI_WDOG_GET_TIMER 0x25
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
+
/* These are here until the real ones get into the watchdog.h interface. */
#ifndef WDIOC_GETTIMEOUT
#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@@ -596,6 +598,7 @@ static int ipmi_heartbeat(void)
struct kernel_ipmi_msg msg;
int rv;
struct ipmi_system_interface_addr addr;
+ int timeout_retries = 0;
if (ipmi_ignore_heartbeat)
return 0;
@@ -616,6 +619,7 @@ static int ipmi_heartbeat(void)
mutex_lock(&heartbeat_lock);
+restart:
atomic_set(&heartbeat_tofree, 2);
/*
@@ -653,7 +657,33 @@ static int ipmi_heartbeat(void)
/* Wait for the heartbeat to be sent. */
wait_for_completion(&heartbeat_wait);
- if (heartbeat_recv_msg.msg.data[0] != 0) {
+ if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
+ timeout_retries++;
+ if (timeout_retries > 3) {
+ printk(KERN_ERR PFX ": Unable to restore the IPMI"
+ " watchdog's settings, giving up.\n");
+ rv = -EIO;
+ goto out_unlock;
+ }
+
+ /*
+ * The timer was not initialized, that means the BMC was
+ * probably reset and lost the watchdog information. Attempt
+ * to restore the timer's info. Note that we still hold
+ * the heartbeat lock, to keep a heartbeat from happening
+ * in this process, so must say no heartbeat to avoid a
+ * deadlock on this mutex.
+ */
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ if (rv) {
+ printk(KERN_ERR PFX ": Unable to send the command to"
+ " set the watchdog's settings, giving up.\n");
+ goto out_unlock;
+ }
+
+ /* We might need a new heartbeat, so do it now */
+ goto restart;
+ } else if (heartbeat_recv_msg.msg.data[0] != 0) {
/*
* Got an error in the heartbeat response. It was already
* reported in ipmi_wdog_msg_handler, but we should return
@@ -662,6 +692,7 @@ static int ipmi_heartbeat(void)
rv = -EINVAL;
}
+out_unlock:
mutex_unlock(&heartbeat_lock);
return rv;
@@ -922,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
void *handler_data)
{
- if (msg->msg.data[0] != 0) {
+ if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+ msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+ printk(KERN_INFO PFX "response: The IPMI controller appears"
+ " to have been reset, will attempt to reinitialize"
+ " the watchdog timer\n");
+ else if (msg->msg.data[0] != 0)
printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
msg->msg.data[0],
msg->msg.cmd);
- }
ipmi_free_recv_msg(msg);
}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 5c6f56f2144..dcd8babae9e 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -343,11 +343,13 @@ static void mv_process_hash_current(int first_block)
else
op.config |= CFG_MID_FRAG;
- writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
- writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
- writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
- writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
- writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ if (first_block) {
+ writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+ writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+ writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+ writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+ writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ }
}
memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 8af8e864a9c..73464a62adf 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1128,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
{ .compatible = "fsl,p1020-memory-controller", },
{ .compatible = "fsl,p1021-memory-controller", },
{ .compatible = "fsl,p2020-memory-controller", },
- { .compatible = "fsl,p4080-memory-controller", },
+ { .compatible = "fsl,qoriq-memory-controller", },
{},
};
MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 8370f72d87f..b0a81173a26 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *timespec, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
timespec->tv_nsec = 0;
get_var_data_locked(efivars, &efivars->walk_entry->var);
size = efivars->walk_entry->var.DataSize;
- memcpy(psi->buf, efivars->walk_entry->var.Data, size);
+ *buf = kmalloc(size, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, efivars->walk_entry->var.Data,
+ size);
efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
struct efivar_entry, list);
return size;
@@ -576,7 +581,8 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
return -1;
}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c811cb10790..2cce44a1d7d 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
ibft_cleanup();
}
+#ifdef CONFIG_ACPI
+static const struct {
+ char *sign;
+} ibft_signs[] = {
+ /*
+ * One spec says "IBFT", the other says "iBFT". We have to check
+ * for both.
+ */
+ { ACPI_SIG_IBFT },
+ { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+ int i;
+ struct acpi_table_header *table = NULL;
+
+ if (acpi_disabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+ acpi_get_table(ibft_signs[i].sign, 0, &table);
+ ibft_addr = (struct acpi_table_ibft *)table;
+ }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
/*
* ibft_init() - creates sysfs tree entries for the iBFT data.
*/
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
{
int rc = 0;
+ /*
+ As on UEFI systems the setup_arch()/find_ibft_region()
+ is called before ACPI tables are parsed and it only does
+ legacy finding.
+ */
+ if (!ibft_addr)
+ acpi_find_ibft_region();
+
if (ibft_addr) {
- printk(KERN_INFO "iBFT detected at 0x%llx.\n",
- (u64)isa_virt_to_bus(ibft_addr));
+ pr_info("iBFT detected.\n");
rc = ibft_check_device();
if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index bfe723266fd..4da4eb9ae92 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
static const struct {
char *sign;
} ibft_signs[] = {
-#ifdef CONFIG_ACPI
- /*
- * One spec says "IBFT", the other says "iBFT". We have to check
- * for both.
- */
- { ACPI_SIG_IBFT },
-#endif
{ "iBFT" },
{ "BIFT" }, /* Broadcom iSCSI Offload */
};
@@ -62,14 +55,6 @@ static const struct {
#define VGA_MEM 0xA0000 /* VGA buffer */
#define VGA_SIZE 0x20000 /* 128kB */
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
- ibft_addr = (struct acpi_table_ibft *)header;
- return 0;
-}
-#endif /* CONFIG_ACPI */
-
static int __init find_ibft_in_mem(void)
{
unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
* the table cannot be valid. */
if (pos + len <= (IBFT_END-1)) {
ibft_addr = (struct acpi_table_ibft *)virt;
+ pr_info("iBFT found at 0x%lx.\n", pos);
goto done;
}
}
@@ -108,20 +94,12 @@ done:
*/
unsigned long __init find_ibft_region(unsigned long *sizep)
{
-#ifdef CONFIG_ACPI
- int i;
-#endif
ibft_addr = NULL;
-#ifdef CONFIG_ACPI
- for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
- acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this */
- if (!ibft_addr && !efi_enabled)
+ if (!efi_enabled)
find_ibft_in_mem();
if (ibft_addr) {
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
index f10fc521951..1eedb6f7fda 100644
--- a/drivers/firmware/sigma.c
+++ b/drivers/firmware/sigma.c
@@ -14,13 +14,34 @@
#include <linux/module.h>
#include <linux/sigma.h>
-/* Return: 0==OK, <0==error, =1 ==no more actions */
+static size_t sigma_action_size(struct sigma_action *sa)
+{
+ size_t payload = 0;
+
+ switch (sa->instr) {
+ case SIGMA_ACTION_WRITEXBYTES:
+ case SIGMA_ACTION_WRITESINGLE:
+ case SIGMA_ACTION_WRITESAFELOAD:
+ payload = sigma_action_len(sa);
+ break;
+ default:
+ break;
+ }
+
+ payload = ALIGN(payload, 2);
+
+ return payload + sizeof(struct sigma_action);
+}
+
+/*
+ * Returns a negative error value in case of an error, 0 if processing of
+ * the firmware should be stopped after this action, 1 otherwise.
+ */
static int
-process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
+process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
{
- struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
size_t len = sigma_action_len(sa);
- int ret = 0;
+ int ret;
pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
sa->instr, sa->addr, len);
@@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
case SIGMA_ACTION_WRITEXBYTES:
case SIGMA_ACTION_WRITESINGLE:
case SIGMA_ACTION_WRITESAFELOAD:
- if (ssfw->fw->size < ssfw->pos + len)
- return -EINVAL;
ret = i2c_master_send(client, (void *)&sa->addr, len);
if (ret < 0)
return -EINVAL;
break;
-
case SIGMA_ACTION_DELAY:
- ret = 0;
udelay(len);
len = 0;
break;
-
case SIGMA_ACTION_END:
- return 1;
-
+ return 0;
default:
return -EINVAL;
}
- /* when arrive here ret=0 or sent data */
- ssfw->pos += sigma_action_size(sa, len);
- return ssfw->pos == ssfw->fw->size;
+ return 1;
}
static int
process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
{
- pr_debug("%s: processing %p\n", __func__, ssfw);
+ struct sigma_action *sa;
+ size_t size;
+ int ret;
+
+ while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
+ sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
+
+ size = sigma_action_size(sa);
+ ssfw->pos += size;
+ if (ssfw->pos > ssfw->fw->size || size == 0)
+ break;
+
+ ret = process_sigma_action(client, sa);
- while (1) {
- int ret = process_sigma_action(client, ssfw);
pr_debug("%s: action returned %i\n", __func__, ret);
- if (ret == 1)
- return 0;
- else if (ret)
+
+ if (ret <= 0)
return ret;
}
+
+ if (ssfw->pos != ssfw->fw->size)
+ return -EINVAL;
+
+ return 0;
}
int process_sigma_firmware(struct i2c_client *client, const char *name)
@@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
/* then verify the header */
ret = -EINVAL;
- if (fw->size < sizeof(*ssfw_head))
+
+ /*
+ * Reject too small or unreasonable large files. The upper limit has been
+ * chosen a bit arbitrarily, but it should be enough for all practical
+ * purposes and having the limit makes it easier to avoid integer
+ * overflows later in the loading process.
+ */
+ if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
goto done;
ssfw_head = (void *)fw->data;
if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
goto done;
- crc = crc32(0, fw->data, fw->size);
+ crc = crc32(0, fw->data + sizeof(*ssfw_head),
+ fw->size - sizeof(*ssfw_head));
pr_debug("%s: crc=%x\n", __func__, crc);
- if (crc != ssfw_head->crc)
+ if (crc != le32_to_cpu(ssfw_head->crc))
goto done;
ssfw.pos = sizeof(*ssfw_head);
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index dbcb0bcfd8d..4e018d6a763 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
-obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o
+obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 038f5eb8b13..f8ce29ef9f8 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -22,7 +22,6 @@
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
#include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
#define DA9052_INPUT 1
#define DA9052_OUTPUT_OPENDRAIN 2
@@ -43,6 +42,9 @@
#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
#define DA9052_GPIO_NIBBLE_SHIFT 4
+#define DA9052_IRQ_GPI0 16
+#define DA9052_GPIO_ODD_SHIFT 7
+#define DA9052_GPIO_EVEN_SHIFT 3
struct da9052_gpio {
struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct da9052_gpio *gpio = to_da9052_gpio(gc);
- unsigned char register_value = 0;
int ret;
if (da9052_gpio_port_odd(offset)) {
- if (value) {
- register_value = DA9052_GPIO_ODD_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_ODD_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_ODD_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio odd reg,%d",
ret);
- }
} else {
- if (value) {
- register_value = DA9052_GPIO_EVEN_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_EVEN_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_EVEN_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio even reg,%d",
ret);
- }
}
}
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
- .can_sleep = 1;
- .ngpio = 16;
- .base = -1;
+ .can_sleep = 1,
+ .ngpio = 16,
+ .base = -1,
};
static int __devinit da9052_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index ea8e7386925..461958fc226 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
&chip->reg->regs[chip->ch].imask);
}
+static void ioh_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien &= ~(1 << (d->irq - chip->irq_base));
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien |= 1 << (d->irq - chip->irq_base);
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
{
struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
int i, j;
int ret = IRQ_NONE;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++, chip++) {
reg_val = ioread32(&chip->reg->regs[i].istatus);
for (j = 0; j < num_ports[i]; j++) {
if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
ct->chip.irq_mask = ioh_irq_mask;
ct->chip.irq_unmask = ioh_irq_unmask;
ct->chip.irq_set_type = ioh_irq_type;
+ ct->chip.irq_disable = ioh_irq_disable;
+ ct->chip.irq_enable = ioh_irq_enable;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index ec3fcf0a7e1..5cd04b65c55 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
return 0;
}
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ /* GPIO 28..31 are input only on MPC5121 */
+ if (gpio >= 28)
+ return -EINVAL;
+
+ return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
mm_gc->save_regs = mpc8xxx_gpio_save_regs;
gc->ngpio = MPC8XXX_GPIO_PINS;
gc->direction_input = mpc8xxx_gpio_dir_in;
- gc->direction_output = mpc8xxx_gpio_dir_out;
- if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
- gc->get = mpc8572_gpio_get;
- else
- gc->get = mpc8xxx_gpio_get;
+ gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+ mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+ gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+ mpc8572_gpio_get : mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
gc->to_irq = mpc8xxx_gpio_to_irq;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 147df8ae79d..d3f3e8f5456 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -546,7 +546,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
* Translate OpenFirmware node properties into platform_data
* WARNING: This is DEPRECATED and will be removed eventually!
*/
-void
+static void
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
struct device_node *node;
@@ -574,7 +574,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
*invert = *val;
}
#else
-void
+static void
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
*gpio_base = -1;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 093c90bd3c1..4102f63230f 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
int ret, irq, i;
static DECLARE_BITMAP(init_irq, NR_IRQS);
- pdata = dev->dev.platform_data;
- if (pdata == NULL)
- return -ENODEV;
-
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 3969f7553fe..d2619d72cec 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -456,6 +456,30 @@ done:
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ /* Decouple all encoders and their attached connectors from this crtc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ connector->encoder = NULL;
+ }
+ }
+
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup
@@ -510,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
- set->mode = NULL;
- set->num_connectors = 0;
+ return drm_crtc_helper_disable(set->crtc);
}
dev = set->crtc->dev;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 6f8afea94fc..2bb07bca511 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -27,82 +27,84 @@
#include "drm.h"
#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
-static DEFINE_MUTEX(exynos_drm_buf_lock);
-
static int lowlevel_buffer_allocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size,
- (dma_addr_t *)&entry->paddr, GFP_KERNEL);
- if (!entry->paddr) {
+ buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
+ &buffer->dma_addr, GFP_KERNEL);
+ if (!buffer->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
return -ENOMEM;
}
- DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n",
- (unsigned int)entry->vaddr, entry->paddr, entry->size);
+ DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr,
+ buffer->size);
return 0;
}
static void lowlevel_buffer_deallocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (entry->paddr && entry->vaddr && entry->size)
- dma_free_writecombine(dev->dev, entry->size, entry->vaddr,
- entry->paddr);
+ if (buffer->dma_addr && buffer->size)
+ dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
+ (dma_addr_t)buffer->dma_addr);
else
- DRM_DEBUG_KMS("entry data is null.\n");
+ DRM_DEBUG_KMS("buffer data are invalid.\n");
}
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s.\n", __FILE__);
+ DRM_DEBUG_KMS("desired size = 0x%x\n", size);
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n");
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
return ERR_PTR(-ENOMEM);
}
- entry->size = size;
+ buffer->size = size;
/*
* allocate memory region with size and set the memory information
- * to vaddr and paddr of a entry object.
+ * to vaddr and dma_addr of a buffer object.
*/
- if (lowlevel_buffer_allocate(dev, entry) < 0) {
- kfree(entry);
- entry = NULL;
+ if (lowlevel_buffer_allocate(dev, buffer) < 0) {
+ kfree(buffer);
+ buffer = NULL;
return ERR_PTR(-ENOMEM);
}
- return entry;
+ return buffer;
}
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (!entry) {
- DRM_DEBUG_KMS("entry is null.\n");
+ if (!buffer) {
+ DRM_DEBUG_KMS("buffer is null.\n");
return;
}
- lowlevel_buffer_deallocate(dev, entry);
+ lowlevel_buffer_deallocate(dev, buffer);
- kfree(entry);
- entry = NULL;
+ kfree(buffer);
+ buffer = NULL;
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 045d59eab01..6e91f9caa5d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,28 +26,15 @@
#ifndef _EXYNOS_DRM_BUF_H_
#define _EXYNOS_DRM_BUF_H_
-/*
- * exynos drm buffer entry structure.
- *
- * @paddr: physical address of allocated memory.
- * @vaddr: kernel virtual address of allocated memory.
- * @size: size of allocated memory.
- */
-struct exynos_drm_buf_entry {
- dma_addr_t paddr;
- void __iomem *vaddr;
- unsigned int size;
-};
-
/* allocate physical memory. */
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size);
-/* get physical memory information of a drm framebuffer. */
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
+/* get memory information of a drm framebuffer. */
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
/* remove allocated physical memory. */
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry);
+ struct exynos_drm_gem_buf *buffer);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 985d9e76872..d620b078425 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -37,6 +37,8 @@
struct exynos_drm_connector {
struct drm_connector drm_connector;
+ uint32_t encoder_id;
+ struct exynos_drm_manager *manager;
};
/* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
DRM_DEBUG_KMS("%s\n", __FILE__);
mode->clock = timing->pixclock / 1000;
+ mode->vrefresh = timing->refresh;
mode->hdisplay = timing->xres;
mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
mode->vsync_start = mode->vdisplay + timing->upper_margin;
mode->vsync_end = mode->vsync_start + timing->vsync_len;
mode->vtotal = mode->vsync_end + timing->lower_margin;
+
+ if (timing->vmode & FB_VMODE_INTERLACED)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+ if (timing->vmode & FB_VMODE_DOUBLE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
}
/* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
memset(timing, 0, sizeof(*timing));
timing->pixclock = mode->clock * 1000;
- timing->refresh = mode->vrefresh;
+ timing->refresh = drm_mode_vrefresh(mode);
timing->xres = mode->hdisplay;
timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
static int exynos_drm_connector_get_modes(struct drm_connector *connector)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
unsigned int count;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (!display) {
- DRM_DEBUG_KMS("display is null.\n");
+ if (!display_ops) {
+ DRM_DEBUG_KMS("display_ops is null.\n");
return 0;
}
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
* P.S. in case of lcd panel, count is always 1 if success
* because lcd panel has only one mode.
*/
- if (display->get_edid) {
+ if (display_ops->get_edid) {
int ret;
void *edid;
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
return 0;
}
- ret = display->get_edid(manager->dev, connector,
+ ret = display_ops->get_edid(manager->dev, connector,
edid, MAX_EDID);
if (ret < 0) {
DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = drm_mode_create(connector->dev);
struct fb_videomode *timing;
- if (display->get_timing)
- timing = display->get_timing(manager->dev);
+ if (display_ops->get_timing)
+ timing = display_ops->get_timing(manager->dev);
else {
drm_mode_destroy(connector->dev, mode);
return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
struct fb_videomode timing;
int ret = MODE_BAD;
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
convert_to_video_timing(&timing, mode);
- if (display && display->check_timing)
- if (!display->check_timing(manager->dev, (void *)&timing))
+ if (display_ops && display_ops->check_timing)
+ if (!display_ops->check_timing(manager->dev, (void *)&timing))
ret = MODE_OK;
return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- return connector->encoder;
+ obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
+ exynos_connector->encoder_id);
+ return NULL;
+ }
+
+ encoder = obj_to_encoder(obj);
+
+ return encoder;
}
static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
static enum drm_connector_status
exynos_drm_connector_detect(struct drm_connector *connector, bool force)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (display && display->is_connected) {
- if (display->is_connected(manager->dev))
+ if (display_ops && display_ops->is_connected) {
+ if (display_ops->is_connected(manager->dev))
status = connector_status_connected;
else
status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
connector = &exynos_connector->drm_connector;
- switch (manager->display->type) {
+ switch (manager->display_ops->type) {
case EXYNOS_DISPLAY_TYPE_HDMI:
type = DRM_MODE_CONNECTOR_HDMIA;
+ connector->interlace_allowed = true;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
break;
default:
type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
if (err)
goto err_connector;
+ exynos_connector->encoder_id = encoder->base.id;
+ exynos_connector->manager = manager;
connector->encoder = encoder;
+
err = drm_mode_connector_attach_encoder(connector, encoder);
if (err) {
DRM_ERROR("failed to attach a connector to a encoder\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9337e5e2dbb..ee43cc22085 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,36 +29,17 @@
#include "drmP.h"
#include "drm_crtc_helper.h"
+#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_encoder.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
drm_crtc)
/*
- * Exynos specific crtc postion structure.
- *
- * @fb_x: offset x on a framebuffer to be displyed
- * - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed
- * - the unit is screen coordinates.
- * @crtc_x: offset x on hardware screen.
- * @crtc_y: offset y on hardware screen.
- * @crtc_w: width of hardware screen.
- * @crtc_h: height of hardware screen.
- */
-struct exynos_drm_crtc_pos {
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
-};
-
-/*
* Exynos specific crtc structure.
*
* @drm_crtc: crtc object.
@@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
exynos_drm_fn_encoder(crtc, overlay,
exynos_drm_encoder_crtc_mode_set);
- exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit);
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
-static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- struct exynos_drm_crtc_pos *pos)
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned int actual_w;
unsigned int actual_h;
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
+ buffer = exynos_drm_fb_get_buf(fb);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null.\n");
return -EFAULT;
}
- overlay->paddr = entry->paddr;
- overlay->vaddr = entry->vaddr;
+ overlay->dma_addr = buffer->dma_addr;
+ overlay->vaddr = buffer->kvaddr;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
+ DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
(unsigned long)overlay->vaddr,
- (unsigned long)overlay->paddr);
+ (unsigned long)overlay->dma_addr);
actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- /* TODO */
+ DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* TODO */
+ exynos_drm_fn_encoder(crtc, NULL,
+ exynos_drm_encoder_crtc_disable);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
}
static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* drm framework doesn't check NULL. */
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
static bool
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c584042d6d2..25f72a62cb8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+/*
+ * Exynos specific crtc postion structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displyed
+ * - the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed
+ * - the unit is screen coordinates.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_w: width of hardware screen.
+ * @crtc_h: height of hardware screen.
+ */
+struct exynos_drm_crtc_pos {
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+};
+
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 83810cbe3c1..53e2216de61 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,6 +27,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_crtc_helper.h"
#include <drm/exynos_drm.h>
@@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(dev);
+
exynos_drm_mode_config_init(dev);
/*
@@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev)
exynos_drm_fbdev_fini(dev);
exynos_drm_device_unregister(dev);
drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
kfree(dev->dev_private);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c03683f2ae7..5e02e6ecc2e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -29,6 +29,7 @@
#ifndef _EXYNOS_DRM_DRV_H_
#define _EXYNOS_DRM_DRV_H_
+#include <linux/module.h>
#include "drm.h"
#define MAX_CRTC 2
@@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops {
* @scan_flag: interlace or progressive way.
* (it could be DRM_MODE_FLAG_*)
* @bpp: pixel size.(in bit)
- * @paddr: bus(accessed by dma) physical memory address to this overlay
- * and this is physically continuous.
+ * @dma_addr: bus(accessed by dma) address to the memory region allocated
+ * for a overlay.
* @vaddr: virtual memory addresss to this overlay.
* @default_win: a window to be enabled.
* @color_key: color key on or off.
@@ -108,7 +109,7 @@ struct exynos_drm_overlay {
unsigned int scan_flag;
unsigned int bpp;
unsigned int pitch;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
void __iomem *vaddr;
bool default_win;
@@ -130,7 +131,7 @@ struct exynos_drm_overlay {
* @check_timing: check if timing is valid or not.
* @power_on: display device on or off.
*/
-struct exynos_drm_display {
+struct exynos_drm_display_ops {
enum exynos_drm_output_type type;
bool (*is_connected)(struct device *dev);
int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -146,12 +147,14 @@ struct exynos_drm_display {
* @mode_set: convert drm_display_mode to hw specific display mode and
* would be called by encoder->mode_set().
* @commit: set current hw specific display mode to hw.
+ * @disable: disable hardware specific display mode.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
*/
struct exynos_drm_manager_ops {
void (*mode_set)(struct device *subdrv_dev, void *mode);
void (*commit)(struct device *subdrv_dev);
+ void (*disable)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
};
@@ -178,7 +181,7 @@ struct exynos_drm_manager {
int pipe;
struct exynos_drm_manager_ops *ops;
struct exynos_drm_overlay_ops *overlay_ops;
- struct exynos_drm_display *display;
+ struct exynos_drm_display_ops *display_ops;
};
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 7cf6fa86a67..153061415ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (manager_ops && manager_ops->commit)
+ manager_ops->commit(manager->dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* TODO */
+ if (manager_ops && manager_ops->disable)
+ manager_ops->disable(manager->dev);
+ break;
+ default:
+ DRM_ERROR("unspecified mode %d\n", mode);
+ break;
+ }
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
- if (display && display->power_on)
- display->power_on(manager->dev, mode);
+ DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+ connector->base.id, mode);
+ if (display_ops && display_ops->power_on)
+ display_ops->power_on(manager->dev, mode);
}
}
}
@@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
{
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
struct exynos_drm_manager_ops *manager_ops = manager->ops;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (manager_ops && manager_ops->commit)
manager_ops->commit(manager->dev);
-
- if (overlay_ops && overlay_ops->commit)
- overlay_ops->commit(manager->dev);
}
static struct drm_crtc *
@@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
+ struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_manager *manager;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
+ /*
+ * if crtc is detached from encoder, check pipe,
+ * otherwise check crtc attached to encoder
+ */
+ if (!encoder->crtc) {
+ manager = to_exynos_encoder(encoder)->manager;
+ if (manager->pipe < 0 ||
+ private->crtc[manager->pipe] != crtc)
+ continue;
+ } else {
+ if (encoder->crtc != crtc)
+ continue;
+ }
fn(encoder, data);
}
@@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int crtc = *(int *)data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * when crtc is detached from encoder, this pipe is used
+ * to select manager operation
+ */
+ manager->pipe = crtc;
- overlay_ops->commit(manager->dev);
+ if (overlay_ops && overlay_ops->commit)
+ overlay_ops->commit(manager->dev);
}
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
struct exynos_drm_overlay *overlay = data;
- overlay_ops->mode_set(manager->dev, overlay);
+ if (overlay_ops && overlay_ops->mode_set)
+ overlay_ops->mode_set(manager->dev, overlay);
+}
+
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (overlay_ops && overlay_ops->disable)
+ overlay_ops->disable(manager->dev);
+
+ /*
+ * crtc is already detached from encoder and last
+ * function for detaching is properly done, so
+ * clear pipe from manager to prevent repeated call
+ */
+ if (!encoder->crtc)
+ manager->pipe = -1;
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 5ecd645d06a..a22acfbf0e4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 48d29cfd524..5bf4a1ac7f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -29,7 +29,9 @@
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_buf.h"
#include "exynos_drm_gem.h"
@@ -41,14 +43,14 @@
*
* @fb: drm framebuffer obejct.
* @exynos_gem_obj: exynos specific gem object containing a gem object.
- * @entry: pointer to exynos drm buffer entry object.
- * - containing only the information to physically continuous memory
- * region allocated at default framebuffer creation.
+ * @buffer: pointer to exynos_drm_gem_buffer object.
+ * - contain the memory information to memory region allocated
+ * at default framebuffer creation.
*/
struct exynos_drm_fb {
struct drm_framebuffer fb;
struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
};
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
* default framebuffer has no gem object so
* a buffer of the default framebuffer should be released at here.
*/
- if (!exynos_fb->exynos_gem_obj && exynos_fb->entry)
- exynos_drm_buf_destroy(fb->dev, exynos_fb->entry);
+ if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
+ exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
kfree(exynos_fb);
exynos_fb = NULL;
@@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
*/
if (!mode_cmd->handle) {
if (!file_priv) {
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
/*
* in case that file_priv is NULL, it allocates
* only buffer and this buffer would be used
* for default framebuffer.
*/
- entry = exynos_drm_buf_create(dev, size);
- if (IS_ERR(entry)) {
- ret = PTR_ERR(entry);
+ buffer = exynos_drm_buf_create(dev, size);
+ if (IS_ERR(buffer)) {
+ ret = PTR_ERR(buffer);
goto err_buffer;
}
- exynos_fb->entry = entry;
+ exynos_fb->buffer = buffer;
- DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n",
- (unsigned long)entry->paddr, size);
+ DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
+ (unsigned long)buffer->dma_addr, size);
goto out;
} else {
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev,
- size,
- &mode_cmd->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+ &mode_cmd->handle,
+ size);
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
goto err_buffer;
@@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
* so that default framebuffer has no its own gem object,
* only its own buffer object.
*/
- exynos_fb->entry = exynos_gem_obj->entry;
+ exynos_fb->buffer = exynos_gem_obj->buffer;
- DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
- (unsigned long)exynos_fb->entry->paddr, size,
+ DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
+ (unsigned long)exynos_fb->buffer->dma_addr, size,
(unsigned int)&exynos_gem_obj->base);
out:
@@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
return exynos_drm_fb_init(file_priv, dev, mode_cmd);
}
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry = exynos_fb->entry;
- if (!entry)
+ buffer = exynos_fb->buffer;
+ if (!buffer)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
- (unsigned long)entry->vaddr,
- (unsigned long)entry->paddr);
+ DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr);
- return entry;
+ return buffer;
+}
+
+static void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
+
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
}
static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_drm_fb_create,
+ .output_poll_changed = exynos_drm_output_poll_changed,
};
void exynos_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 1f4b3d1a771..836f4100818 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -33,6 +33,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
#define MAX_CONNECTOR 4
@@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = {
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
- struct drm_framebuffer *fb,
- unsigned int fb_width,
- unsigned int fb_height)
+ struct drm_framebuffer *fb)
{
struct fb_info *fbi = helper->fbdev;
struct drm_device *dev = helper->dev;
struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
- struct exynos_drm_buf_entry *entry;
- unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3);
+ struct exynos_drm_gem_buf *buffer;
+ unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
unsigned long offset;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
exynos_fb->fb = fb;
drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
+ buffer = exynos_drm_fb_get_buf(fb);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null.\n");
return -EFAULT;
}
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
offset += fbi->var.yoffset * fb->pitch;
- dev->mode_config.fb_base = entry->paddr;
- fbi->screen_base = entry->vaddr + offset;
- fbi->fix.smem_start = entry->paddr + offset;
+ dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
+ fbi->screen_base = buffer->kvaddr + offset;
+ fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
+ ret = exynos_drm_fbdev_update(helper, helper->fb);
if (ret < 0)
fb_dealloc_cmap(&fbi->cmap);
@@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
}
helper->fb = exynos_fbdev->fb;
- return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
+ return exynos_drm_fbdev_update(helper, helper->fb);
}
static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
fb_helper = private->fb_helper;
if (fb_helper) {
+ struct list_head temp_list;
+
+ INIT_LIST_HEAD(&temp_list);
+
+ /*
+ * fb_helper is reintialized but kernel fb is reused
+ * so kernel_fb_list need to be backuped and restored
+ */
+ if (!list_empty(&fb_helper->kernel_fb_list))
+ list_replace_init(&fb_helper->kernel_fb_list,
+ &temp_list);
+
drm_fb_helper_fini(fb_helper);
ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
return ret;
}
+ if (!list_empty(&temp_list))
+ list_replace(&temp_list, &fb_helper->kernel_fb_list);
+
ret = drm_fb_helper_single_add_all_connectors(fb_helper);
if (ret < 0) {
DRM_ERROR("failed to add fb helper to connectors\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 4659c88cdd9..db3b3d9e731 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -64,7 +64,7 @@ struct fimd_win_data {
unsigned int fb_width;
unsigned int fb_height;
unsigned int bpp;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
@@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
return 0;
}
-static struct exynos_drm_display fimd_display = {
+static struct exynos_drm_display_ops fimd_display_ops = {
.type = EXYNOS_DISPLAY_TYPE_LCD,
.is_connected = fimd_display_is_connected,
.get_timing = fimd_get_timing,
@@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev)
writel(val, ctx->regs + VIDCON0);
}
+static void fimd_disable(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct drm_device *drm_dev = subdrv->drm_dev;
+ struct exynos_drm_manager *manager = &subdrv->manager;
+ u32 val;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* fimd dma off */
+ val = readl(ctx->regs + VIDCON0);
+ val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
+ writel(val, ctx->regs + VIDCON0);
+
+ /*
+ * if vblank is enabled status with dma off then
+ * it disables vsync interrupt.
+ */
+ if (drm_dev->vblank_enabled[manager->pipe] &&
+ atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
+ drm_vblank_put(drm_dev, manager->pipe);
+
+ /*
+ * if vblank_disable_allowed is 0 then disable
+ * vsync interrupt right now else the vsync interrupt
+ * would be disabled by drm timer once a current process
+ * gives up ownershop of vblank event.
+ */
+ if (!drm_dev->vblank_disable_allowed)
+ drm_vblank_off(drm_dev, manager->pipe);
+ }
+}
+
static int fimd_enable_vblank(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
@@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev)
static struct exynos_drm_manager_ops fimd_manager_ops = {
.commit = fimd_commit,
+ .disable = fimd_disable,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
};
@@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->ovl_height = overlay->crtc_height;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
- win_data->paddr = overlay->paddr + offset;
+ win_data->dma_addr = overlay->dma_addr + offset;
win_data->vaddr = overlay->vaddr + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
@@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev,
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->paddr,
+ (unsigned long)win_data->dma_addr,
(unsigned long)win_data->vaddr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
@@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev)
writel(val, ctx->regs + SHADOWCON);
/* buffer start address */
- val = win_data->paddr;
+ val = (unsigned long)win_data->dma_addr;
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
- val = win_data->paddr + size;
+ val = (unsigned long)(win_data->dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)win_data->paddr, val, size);
+ (unsigned long)win_data->dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
@@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev)
static void fimd_win_disable(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
- struct fimd_win_data *win_data;
int win = ctx->default_win;
u32 val;
@@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev)
if (win < 0 || win > WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
-
/* protect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_WINx_PROTECT(win);
@@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
/* VSYNC interrupt */
writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+ /*
+ * in case that vblank_disable_allowed is 1, it could induce
+ * the problem that manager->pipe could be -1 because with
+ * disable callback, vsync interrupt isn't disabled and at this moment,
+ * vsync interrupt could occur. the vsync interrupt would be disabled
+ * by timer handler later.
+ */
+ if (manager->pipe == -1)
+ return IRQ_HANDLED;
+
drm_handle_vblank(drm_dev, manager->pipe);
fimd_finish_pageflip(drm_dev, manager->pipe);
@@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->irq_enabled = 1;
- /*
- * with vblank_disable_allowed = 1, vblank interrupt will be disabled
- * by drm timer once a current process gives up ownership of
- * vblank event.(drm_vblank_put function was called)
- */
- drm_dev->vblank_disable_allowed = 1;
-
return 0;
}
@@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
subdrv->manager.pipe = -1;
subdrv->manager.ops = &fimd_manager_ops;
subdrv->manager.overlay_ops = &fimd_overlay_ops;
- subdrv->manager.display = &fimd_display;
+ subdrv->manager.display_ops = &fimd_display_ops;
subdrv->manager.dev = dev;
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index a8e7a88906e..aba0fe47f7e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
}
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle)
+static struct exynos_drm_gem_obj
+ *exynos_drm_gem_init(struct drm_device *drm_dev,
+ struct drm_file *file_priv, unsigned int *handle,
+ unsigned int size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
struct drm_gem_object *obj;
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- size = roundup(size, PAGE_SIZE);
-
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
if (!exynos_gem_obj) {
DRM_ERROR("failed to allocate exynos gem object.\n");
return ERR_PTR(-ENOMEM);
}
- /* allocate the new buffer object and memory region. */
- entry = exynos_drm_buf_create(dev, size);
- if (!entry) {
- kfree(exynos_gem_obj);
- return ERR_PTR(-ENOMEM);
- }
-
- exynos_gem_obj->entry = entry;
-
obj = &exynos_gem_obj->base;
- ret = drm_gem_object_init(dev, obj, size);
+ ret = drm_gem_object_init(drm_dev, obj, size);
if (ret < 0) {
- DRM_ERROR("failed to initailize gem object.\n");
- goto err_obj_init;
+ DRM_ERROR("failed to initialize gem object.\n");
+ ret = -EINVAL;
+ goto err_object_init;
}
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -127,24 +115,50 @@ err_handle_create:
err_create_mmap_offset:
drm_gem_object_release(obj);
-err_obj_init:
- exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
-
+err_object_init:
kfree(exynos_gem_obj);
return ERR_PTR(ret);
}
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ unsigned int *handle, unsigned long size)
+{
+
+ struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+ struct exynos_drm_gem_buf *buffer;
+
+ size = roundup(size, PAGE_SIZE);
+
+ DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
+
+ buffer = exynos_drm_buf_create(dev, size);
+ if (IS_ERR(buffer)) {
+ return ERR_CAST(buffer);
+ }
+
+ exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
+ if (IS_ERR(exynos_gem_obj)) {
+ exynos_drm_buf_destroy(dev, buffer);
+ return exynos_gem_obj;
+ }
+
+ exynos_gem_obj->buffer = buffer;
+
+ return exynos_gem_obj;
+}
+
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_create *args = data;
- struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
- DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+ &args->handle, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned long pfn, vm_size;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
vm_size = vma->vm_end - vma->vm_start;
/*
- * a entry contains information to physically continuous memory
+ * a buffer contains information to physically continuous memory
* allocated by user request or at framebuffer creation.
*/
- entry = exynos_gem_obj->entry;
+ buffer = exynos_gem_obj->buffer;
/* check if user-requested size is valid. */
- if (vm_size > entry->size)
+ if (vm_size > buffer->size)
return -EINVAL;
/*
* get page frame number to physical memory to be mapped
* to user space.
*/
- pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT;
+ pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
@@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
exynos_gem_obj = to_exynos_gem_obj(gem_obj);
- exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry);
+ exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
kfree(exynos_gem_obj);
}
@@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * args->bpp >> 3;
args->size = args->pitch * args->height;
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
+ args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset;
+ pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+ PAGE_SHIFT) + page_offset;
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e5fc0148277..ef8797334e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -30,13 +30,29 @@
struct exynos_drm_gem_obj, base)
/*
+ * exynos drm gem buffer structure.
+ *
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ * - this address could be physical address without IOMMU and
+ * device address with IOMMU.
+ * @size: size of allocated memory region.
+ */
+struct exynos_drm_gem_buf {
+ void __iomem *kvaddr;
+ dma_addr_t dma_addr;
+ unsigned long size;
+};
+
+/*
* exynos drm buffer structure.
*
* @base: a gem object.
* - a new handle to this gem object would be created
* by drm_gem_handle_create().
- * @entry: pointer to exynos drm buffer entry object.
- * - containing the information to physically
+ * @buffer: a pointer to exynos_drm_gem_buffer object.
+ * - contain the information to memory region allocated
+ * by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
*
@@ -45,13 +61,13 @@
*/
struct exynos_drm_gem_obj {
struct drm_gem_object base;
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
};
/* create a new buffer and get a new gem handle. */
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle);
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ unsigned int *handle, unsigned long size);
/*
* request gem object creation and buffer allocation as the size
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d09a6e02dc9..004b048c519 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
const struct intel_device_info *info = INTEL_INFO(dev);
seq_printf(m, "gen: %d\n", info->gen);
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
B(is_mobile);
B(is_i85x);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9533c54c93..a9ae374861e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
diff1 = now - dev_priv->last_time1;
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->chipset_power;
+
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
+ dev_priv->chipset_power = ret;
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 15bfa9145d2..a1103fc6597 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
module_param_named(semaphores, i915_semaphores, int, 0600);
MODULE_PARM_DESC(semaphores,
- "Use semaphores for inter-ring sync (default: false)");
+ "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6 (default: true)");
+ "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
}
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ POSTING_READ(FORCEWAKE_MT);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+ udelay(10);
+}
+
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
/* Forcewake is atomic in case we get in here without the lock */
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
- __gen6_gt_force_wake_get(dev_priv);
+ dev_priv->display.force_wake_get(dev_priv);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ POSTING_READ(FORCEWAKE_MT);
+}
+
/*
* see gen6_gt_force_wake_get()
*/
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_dec_and_test(&dev_priv->forcewake_count))
- __gen6_gt_force_wake_put(dev_priv);
+ dev_priv->display.force_wake_put(dev_priv);
}
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE) && \
+ ((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a9c1b97980..554bef7a3b9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -107,6 +107,7 @@ struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
+struct drm_i915_private;
struct intel_opregion {
struct opregion_header *header;
@@ -221,6 +222,8 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -710,6 +713,7 @@ typedef struct drm_i915_private {
u64 last_count1;
unsigned long last_time1;
+ unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
@@ -998,11 +1002,11 @@ extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
@@ -1308,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1352,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE) && \
+ ((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3693e83a97f..c681dc149d2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev)->gen == 6)
+ return !intel_iommu_enabled;
+
+ return 1;
+}
+
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+ if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b080cc82400..a26d5b0a369 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3303,10 +3303,10 @@
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
-#define TRANSCODER_A (0)
-#define TRANSCODER_B (1 << 30)
-#define TRANSCODER(pipe) ((pipe) << 30)
-#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER(pipe) ((pipe) << 30)
+#define TRANSCODER_CPT(pipe) ((pipe) << 29)
+#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER_MASK_CPT (3 << 29)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3447,8 +3447,30 @@
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
+#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_MT_ACK 0x130040
+#define ECOBUS 0xa180
+#define FORCEWAKE_MT_ENABLE (1<<5)
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e77a863a383..d809b038ca8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -38,8 +38,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
-
#include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
@@ -4670,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
/**
* intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
* @crtc: CRTC structure
+ * @mode: requested mode
*
* A pipe may be connected to one or more outputs. Based on the depth of the
* attached framebuffer, choose a good color depth to use on the pipe.
@@ -4681,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
* HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
* Displays may support a restricted set as well, check EDID and clamp as
* appropriate.
+ * DP may want to dither down to 6bpc to fit larger modes
*
* RETURNS:
* Dithering requirement (i.e. false if display bpc and pipe bpc match,
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
- unsigned int *pipe_bpp)
+ unsigned int *pipe_bpp,
+ struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4758,6 +4761,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
}
}
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+ display_bpc = 6;
+ }
+
/*
* We could just drive the pipe at the highest bpc all the time and
* enable dithering as needed, but that costs bandwidth. So choose
@@ -5019,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+ }
+ }
+
dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5480,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
switch (pipe_bpp) {
case 18:
temp |= PIPE_6BPC;
@@ -7189,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto free_work;
+
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
+ drm_vblank_put(dev, intel_crtc->pipe);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
@@ -7212,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->fb = fb;
- ret = drm_vblank_get(dev, intel_crtc->pipe);
- if (ret)
- goto cleanup_objs;
-
work->pending_flip_obj = obj;
work->enable_stall_check = true;
@@ -7238,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
@@ -7247,6 +7265,8 @@ cleanup_objs:
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
kfree(work);
return ret;
@@ -7887,6 +7907,33 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /*
+ * Enable rc6 on Sandybridge if DMA remapping is disabled
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
+ intel_iommu_enabled ? "true" : "false",
+ !intel_iommu_enabled ? "en" : "dis");
+ return !intel_iommu_enabled;
+ }
+ DRM_DEBUG_DRIVER("RC6 enabled\n");
+ return 1;
+}
+
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7923,7 +7970,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- if (i915_enable_rc6)
+ if (intel_enable_rc6(dev_priv->dev))
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
@@ -8372,7 +8419,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
- if (!i915_enable_rc6)
+ if (!intel_enable_rc6(dev))
return;
mutex_lock(&dev->struct_mutex);
@@ -8491,6 +8538,28 @@ static void intel_init_display(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4d0358fad93..92b041b66e4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
*/
static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int bpp = 24;
- if (intel_crtc)
+ if (check_bpp)
+ bpp = check_bpp;
+ else if (intel_crtc)
bpp = intel_crtc->bpp;
return (pixel_clock * bpp + 9) / 10;
@@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_rate, mode_rate;
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- if (intel_dp_link_required(intel_dp, mode->clock)
- > intel_dp_max_data_rate(max_link_clock, max_lanes))
- return MODE_CLOCK_HIGH;
+ mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+ if (mode_rate > max_rate) {
+ mode_rate = intel_dp_link_required(intel_dp,
+ mode->clock, 18);
+ if (mode_rate > max_rate)
+ return MODE_CLOCK_HIGH;
+ else
+ mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+ }
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
@@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev))
- aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
@@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(intel_dp, mode->clock)
+ if (intel_dp_link_required(intel_dp, mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
@@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
/*
- * There are three kinds of DP registers:
+ * There are four kinds of DP registers:
*
* IBX PCH
- * CPU
+ * SNB CPU
+ * IVB CPU
* CPT PCH
*
* IBX PCH and CPU are the same for almost everything,
@@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Split out the IBX/CPU vs CPT settings */
- if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= intel_crtc->pipe << 29;
+
+ /* don't miss out required setting for eDP */
+ intel_dp->DP |= DP_PLL_ENABLE;
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1375,34 +1406,59 @@ static char *link_train_names[] = {
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
-#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
-#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_800;
+ else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
}
}
static void
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
- struct drm_device *dev = intel_dp->base.base.dev;
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
- int voltage_max;
+ uint8_t voltage_max;
+ uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
p = this_p;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
- voltage_max = I830_DP_VOLTAGE_MAX_CPT;
- else
- voltage_max = I830_DP_VOLTAGE_MAX;
+ voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
- if (p >= intel_dp_pre_emphasis_max(v))
- p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
@@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
}
}
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
@@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
@@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP |= DP_LINK_TRAIN_OFF_CPT;
else
DP |= DP_LINK_TRAIN_OFF;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bd9a604b73d..a1b4343814e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,7 @@
/* drm_display_mode->private_flags */
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 42f165a520d..e44191132ac 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 21f60b7d69a..04d79fd1dc9 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
} else {
- if (IS_PINEVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen < 4)
max >>= 17;
- } else {
+ else
max >>= 16;
- if (INTEL_INFO(dev)->gen < 4)
- max &= ~1;
- }
if (is_backlight_combination_mode(dev))
max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (IS_PINEVIEW(dev))
+ if (INTEL_INFO(dev)->gen < 4)
val >>= 1;
if (is_backlight_combination_mode(dev)) {
u8 lbpc;
- val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
- if (IS_PINEVIEW(dev)) {
- tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
- } else
- tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3003fb25aef..f7b9268df26 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -50,6 +50,7 @@
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+
+ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+ else
+ sdvox |= TRANSCODER(intel_crtc->pipe);
+
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
return status;
}
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
+{
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (edid == NULL)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- ret = connector_status_disconnected;
- else
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
connector->display_info.raw_edid = NULL;
kfree(edid);
} else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
- if (connector_is_digital == monitor_is_digital) {
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid)) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index ddbabefb427..b12fd2c8081 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
}
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct nouveau_bo *bo;
+ int ret;
+
+ args->pitch = roundup(args->width * (args->bpp / 8), 256);
+ args->size = args->pitch * args->height;
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
+ drm_gem_object_unreference_unlocked(bo->gem);
+ return ret;
+}
+
+int
+nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *poffset)
+{
+ struct drm_gem_object *gem;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (gem) {
+ struct nouveau_bo *bo = gem->driver_private;
+ *poffset = bo->bo.addr_space_offset;
+ drm_gem_object_unreference_unlocked(gem);
+ return 0;
+ }
+
+ return -ENOENT;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9f7bb129526..9791d13c9e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -433,6 +433,10 @@ static struct drm_driver driver = {
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
+ .dumb_create = nouveau_display_dumb_create,
+ .dumb_map_offset = nouveau_display_dumb_map_offset,
+ .dumb_destroy = nouveau_display_dumb_destroy,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
#ifdef GIT_REVISION
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 29837da1098..4c0be3a4ed8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
int nouveau_finish_page_flip(struct nouveau_channel *,
struct nouveau_page_flip_state *);
+int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+ struct drm_mode_create_dumb *args);
+int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+ uint32_t handle, uint64_t *offset);
+int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+ uint32_t handle);
/* nv10_gpio.c */
int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 02222c540ae..960c0ae0c0c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap, base, size);
+ ret = drm_mm_init(&chan->ramin_heap, base, size - base);
if (ret) {
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
nouveau_gpuobj_ref(NULL, &chan->ramin);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b75258a9fe4..c8a463b76c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be)
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
+ nvbe->unmap_pages = false;
}
+
+ nvbe->pages = NULL;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index d23ca00e7d6..06de250fe61 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
disp->irq.dcb = NULL;
@@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dcb = disp->irq.dcb;
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index a74e501afd2..ecfafd70cf0 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -381,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
u8 tpnr[GPC_MAX];
int i, gpc, tpc;
+ nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
/*
* TP ROP UNKVAL(magic_not_rop_nr)
* 450: 4/0/0/0 2 3
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 23d63b4b3d7..cb006a718e7 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
continue;
if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->or) {
+ nv_partner->dcb->or == nv_encoder->dcb->or) {
if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
return;
break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 87631fede1f..2b97262e3ab 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- if (tiling_flags & RADEON_TILING_MACRO)
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0: /* 1KB rows */
+ default:
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+ break;
+ case 1: /* 2KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+ break;
+ case 2: /* 4KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+ break;
+ }
+
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
- else if (tiling_flags & RADEON_TILING_MICRO)
+ } else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1d603a3335d..5e00d1670aa 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 38e1bda73d3..cd4590aae15 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
u32 npipes;
+ u32 row_size;
/* value we track */
u32 nsamples;
u32 cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
struct radeon_bo *db_s_write_bo;
};
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+ if (tiling_flags & RADEON_TILING_MACRO)
+ return ARRAY_2D_TILED_THIN1;
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ return ARRAY_1D_TILED_THIN1;
+ else
+ return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+ switch (nbanks) {
+ case 2:
+ return ADDR_SURF_2_BANK;
+ case 4:
+ return ADDR_SURF_4_BANK;
+ case 8:
+ default:
+ return ADDR_SURF_8_BANK;
+ case 16:
+ return ADDR_SURF_16_BANK;
+ }
+}
+
+static u32 evergreen_cs_get_tile_split(u32 row_size)
+{
+ switch (row_size) {
+ case 1:
+ default:
+ return ADDR_SURF_TILE_SPLIT_1KB;
+ case 2:
+ return ADDR_SURF_TILE_SPLIT_2KB;
+ case 4:
+ return ADDR_SURF_TILE_SPLIT_4KB;
+ }
+}
+
static void evergreen_cs_track_init(struct evergreen_cs_track *track)
{
int i;
@@ -490,12 +529,11 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
ib[idx] &= ~Z_ARRAY_MODE(0xf);
track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
}
}
break;
@@ -618,13 +656,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR8_INFO:
@@ -640,13 +673,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR0_PITCH:
@@ -701,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ }
break;
case CB_COLOR0_DIM:
case CB_COLOR1_DIM:
@@ -1318,10 +1356,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
if (!p->keep_tiling_flags) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ ib[idx+1+(i*8)+1] |=
+ TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx+1+(i*8)+6] |=
+ TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ ib[idx+1+(i*8)+7] |=
+ TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ }
}
texture = reloc->robj;
/* tex mip base */
@@ -1422,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_packet pkt;
struct evergreen_cs_track *track;
+ u32 tmp;
int r;
if (p->track == NULL) {
@@ -1430,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
if (track == NULL)
return -ENOMEM;
evergreen_cs_track_init(track);
- track->npipes = p->rdev->config.evergreen.tiling_npipes;
- track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
- track->group_size = p->rdev->config.evergreen.tiling_group_size;
+ if (p->rdev->family >= CHIP_CAYMAN)
+ tmp = p->rdev->config.cayman.tile_config;
+ else
+ tmp = p->rdev->config.evergreen.tile_config;
+
+ switch (tmp & 0xf) {
+ case 0:
+ track->npipes = 1;
+ break;
+ case 1:
+ default:
+ track->npipes = 2;
+ break;
+ case 2:
+ track->npipes = 4;
+ break;
+ case 3:
+ track->npipes = 8;
+ break;
+ }
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0:
+ track->nbanks = 4;
+ break;
+ case 1:
+ default:
+ track->nbanks = 8;
+ break;
+ case 2:
+ track->nbanks = 16;
+ break;
+ }
+
+ switch ((tmp & 0xf00) >> 8) {
+ case 0:
+ track->group_size = 256;
+ break;
+ case 1:
+ default:
+ track->group_size = 512;
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0:
+ track->row_size = 1;
+ break;
+ case 1:
+ default:
+ track->row_size = 2;
+ break;
+ case 2:
+ track->row_size = 4;
+ break;
+ }
+
p->track = track;
}
do {
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index c781c92c345..7d7f2155e34 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -42,6 +42,17 @@
# define EVERGREEN_GRPH_DEPTH_8BPP 0
# define EVERGREEN_GRPH_DEPTH_16BPP 1
# define EVERGREEN_GRPH_DEPTH_32BPP 2
+# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define EVERGREEN_ADDR_SURF_2_BANK 0
+# define EVERGREEN_ADDR_SURF_4_BANK 1
+# define EVERGREEN_ADDR_SURF_8_BANK 2
+# define EVERGREEN_ADDR_SURF_16_BANK 3
+# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
@@ -61,6 +72,24 @@
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
+# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
+# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b937c49054d..e00039e59a7 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -899,6 +899,10 @@
#define DB_HTILE_DATA_BASE 0x28014
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
+# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
+# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
+# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
+# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
#define DB_STENCIL_INFO 0x28044
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
@@ -951,6 +955,29 @@
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_ATTRIB 0x28c74
+# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
+# define ADDR_SURF_TILE_SPLIT_64B 0
+# define ADDR_SURF_TILE_SPLIT_128B 1
+# define ADDR_SURF_TILE_SPLIT_256B 2
+# define ADDR_SURF_TILE_SPLIT_512B 3
+# define ADDR_SURF_TILE_SPLIT_1KB 4
+# define ADDR_SURF_TILE_SPLIT_2KB 5
+# define ADDR_SURF_TILE_SPLIT_4KB 6
+# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
+# define ADDR_SURF_2_BANK 0
+# define ADDR_SURF_4_BANK 1
+# define ADDR_SURF_8_BANK 2
+# define ADDR_SURF_16_BANK 3
+# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
+# define ADDR_SURF_BANK_WIDTH_1 0
+# define ADDR_SURF_BANK_WIDTH_2 1
+# define ADDR_SURF_BANK_WIDTH_4 2
+# define ADDR_SURF_BANK_WIDTH_8 3
+# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
+# define ADDR_SURF_BANK_HEIGHT_1 0
+# define ADDR_SURF_BANK_HEIGHT_2 1
+# define ADDR_SURF_BANK_HEIGHT_4 2
+# define ADDR_SURF_BANK_HEIGHT_8 3
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
@@ -1137,7 +1164,11 @@
# define SQ_SEL_1 5
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
+# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
+# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
+# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad158ea4990..bfc08f6320f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ int i;
/* Lock the graphics update lock */
/* update the scanout addresses */
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
/* Wait for update_pending to go high. */
- while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3f6636bb2d7..3516a6081dc 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+ DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+ acpi_format_exception(status));
kfree(buffer.pointer);
return 1;
}
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
acpi_handle handle;
int ret;
- /* No need to proceed if we're sure that ATIF is not supported */
- if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
- return 0;
-
/* Get the device handle */
handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ /* No need to proceed if we're sure that ATIF is not supported */
+ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+ return 0;
+
/* Call the ATIF method */
ret = radeon_atif_call(handle);
if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 06e413e6a92..4b27efa4405 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
- return true;
+ return radeon_encoder->encoder_id;
default:
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
}
-
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 481b99e89f6..b1053d64042 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a983f410ab8..23ae1c60ab3 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8cca91a93bd..dc279706ca7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id,
struct vmw_resource **p_res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 03bbc2a6f9a..a0c2f12b1e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
+ const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
@@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
- hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ hwversion = ioread32(fifo_mem +
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
+
if (hwversion == 0)
return false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 3f6343502d1..66917c6c381 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
-
- param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+ param->value =
+ ioread32(fifo_mem +
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
break;
}
default:
@@ -140,7 +146,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
@@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
goto out_no_fb;
}
-
vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
- if (!vfb->dmabuf) {
- DRM_ERROR("Framebuffer not dmabuf backed.\n");
- ret = -EINVAL;
- goto out_no_fb;
- }
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 880e285d757..8aa1dbb45c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,6 +31,44 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+
+struct vmw_clip_rect {
+ int x1, x2, y1, y2;
+};
+
+/**
+ * Clip @num_rects number of @rects against @clip storing the
+ * results in @out_rects and the number of passed rects in @out_num.
+ */
+void vmw_clip_cliprects(struct drm_clip_rect *rects,
+ int num_rects,
+ struct vmw_clip_rect clip,
+ SVGASignedRect *out_rects,
+ int *out_num)
+{
+ int i, k;
+
+ for (i = 0, k = 0; i < num_rects; i++) {
+ int x1 = max_t(int, clip.x1, rects[i].x1);
+ int y1 = max_t(int, clip.y1, rects[i].y1);
+ int x2 = min_t(int, clip.x2, rects[i].x2);
+ int y2 = min_t(int, clip.y2, rects[i].y2);
+
+ if (x1 >= x2)
+ continue;
+ if (y1 >= y2)
+ continue;
+
+ out_rects[k].left = x1;
+ out_rects[k].top = y1;
+ out_rects[k].right = x2;
+ out_rects[k].bottom = y2;
+ k++;
+ }
+
+ *out_num = k;
+}
+
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
@@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
return 0;
}
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ void *virtual;
+ bool dummy;
+ int ret;
+
+ kmap_offset = 0;
+ kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+ hotspotX, hotspotY);
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(&dmabuf->base);
+
+ return ret;
+}
+
+
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
@@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
return -EINVAL;
if (handle) {
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
- handle, &surface);
- if (!ret) {
- if (!surface->snooper.image) {
- DRM_ERROR("surface not suitable for cursor\n");
- vmw_surface_unreference(&surface);
- return -EINVAL;
- }
- } else {
- ret = vmw_user_dmabuf_lookup(tfile,
- handle, &dmabuf);
- if (ret) {
- DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
- return -EINVAL;
- }
+ ret = vmw_user_lookup_handle(dev_priv, tfile,
+ handle, &surface, &dmabuf);
+ if (ret) {
+ DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+ return -EINVAL;
}
}
+ /* need to do this before taking down old image */
+ if (surface && !surface->snooper.image) {
+ DRM_ERROR("surface not suitable for cursor\n");
+ vmw_surface_unreference(&surface);
+ return -EINVAL;
+ }
+
/* takedown old cursor */
if (du->cursor_surface) {
du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
} else if (dmabuf) {
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- void *virtual;
- bool dummy;
-
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
- kmap_offset = 0;
- kmap_num = (64*64*4) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return -EINVAL;
- }
-
- ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- vmw_cursor_update_image(dev_priv, virtual, 64, 64,
- du->hotspot_x, du->hotspot_y);
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(&dmabuf->base);
-
+ ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
+ du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return 0;
@@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct drm_clip_rect *clips,
unsigned num_clips, int inc)
{
- struct drm_clip_rect *clips_ptr;
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *clips_ptr;
+ struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, num_units;
@@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
} *cmd;
SVGASignedRect *blits;
-
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
@@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
BUG_ON(!clips || !num_clips);
+ tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+ if (unlikely(tmp == NULL)) {
+ DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kzalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Temporary fifo memory alloc failed.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_tmp;
}
+ /* setup blits pointer */
+ blits = (SVGASignedRect *)&cmd[1];
+
+ /* initial clip region */
left = clips->x1;
right = clips->x2;
top = clips->y1;
@@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
cmd->body.srcRect.bottom = bottom;
clips_ptr = clips;
- blits = (SVGASignedRect *)&cmd[1];
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
- blits[i].left = clips_ptr->x1 - left;
- blits[i].right = clips_ptr->x2 - left;
- blits[i].top = clips_ptr->y1 - top;
- blits[i].bottom = clips_ptr->y2 - top;
+ tmp[i].x1 = clips_ptr->x1 - left;
+ tmp[i].x2 = clips_ptr->x2 - left;
+ tmp[i].y1 = clips_ptr->y1 - top;
+ tmp[i].y2 = clips_ptr->y2 - top;
}
/* do per unit writing, reuse fifo for each */
for (i = 0; i < num_units; i++) {
struct vmw_display_unit *unit = units[i];
- int clip_x1 = left - unit->crtc.x;
- int clip_y1 = top - unit->crtc.y;
- int clip_x2 = right - unit->crtc.x;
- int clip_y2 = bottom - unit->crtc.y;
+ struct vmw_clip_rect clip;
+ int num;
+
+ clip.x1 = left - unit->crtc.x;
+ clip.y1 = top - unit->crtc.y;
+ clip.x2 = right - unit->crtc.x;
+ clip.y2 = bottom - unit->crtc.y;
/* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
+ if (clip.x1 >= unit->crtc.mode.hdisplay ||
+ clip.y1 >= unit->crtc.mode.vdisplay ||
+ clip.x2 <= 0 || clip.y2 <= 0)
continue;
+ /*
+ * In order for the clip rects to be correctly scaled
+ * the src and dest rects needs to be the same size.
+ */
+ cmd->body.destRect.left = clip.x1;
+ cmd->body.destRect.right = clip.x2;
+ cmd->body.destRect.top = clip.y1;
+ cmd->body.destRect.bottom = clip.y2;
+
+ /* create a clip rect of the crtc in dest coords */
+ clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+ clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+ clip.x1 = 0 - clip.x1;
+ clip.y1 = 0 - clip.y1;
+
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-
cmd->body.destScreenId = unit->unit;
- /*
- * The blit command is a lot more resilient then the
- * readback command when it comes to clip rects. So its
- * okay to go out of bounds.
- */
+ /* clip and write blits to cmd stream */
+ vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
- cmd->body.destRect.left = clip_x1;
- cmd->body.destRect.right = clip_x2;
- cmd->body.destRect.top = clip_y1;
- cmd->body.destRect.bottom = clip_y2;
+ /* if no cliprects hit skip this */
+ if (num == 0)
+ continue;
+ /* recalculate package length */
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
break;
}
+
kfree(cmd);
+out_free_tmp:
+ kfree(tmp);
return ret;
}
@@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
* Sanity checks.
*/
+ /* Surface must be marked as a scanout. */
+ if (unlikely(!surface->scanout))
+ return -EINVAL;
+
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
surface->sizes[0].width < mode_cmd->width ||
@@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
int clip_y1 = clips_ptr->y1 - unit->crtc.y;
int clip_x2 = clips_ptr->x2 - unit->crtc.x;
int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+ int move_x, move_y;
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
clip_x2 <= 0 || clip_y2 <= 0)
continue;
+ /* clip size to crtc size */
+ clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
+ clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
+
+ /* translate both src and dest to bring clip into screen */
+ move_x = min_t(int, clip_x1, 0);
+ move_y = min_t(int, clip_y1, 0);
+
+ /* actual translate done here */
blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
blits[hit_num].body.destScreenId = unit->unit;
- blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
- blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
- blits[hit_num].body.destRect.left = clip_x1;
- blits[hit_num].body.destRect.top = clip_y1;
+ blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
+ blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
+ blits[hit_num].body.destRect.left = clip_x1 - move_x;
+ blits[hit_num].body.destRect.top = clip_y1 - move_y;
blits[hit_num].body.destRect.right = clip_x2;
blits[hit_num].body.destRect.bottom = clip_y2;
hit_num++;
@@ -1033,46 +1123,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
- /**
- * End conditioned code.
- */
-
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
- mode_cmd->handle, &surface);
+ /* returns either a dmabuf or surface */
+ ret = vmw_user_lookup_handle(dev_priv, tfile,
+ mode_cmd->handle,
+ &surface, &bo);
if (ret)
- goto try_dmabuf;
-
- if (!surface->scanout)
- goto err_not_scanout;
-
- ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
- &vfb, mode_cmd);
-
- /* vmw_user_surface_lookup takes one ref so does new_fb */
- vmw_surface_unreference(&surface);
-
- if (ret) {
- DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- ttm_base_object_unref(&user_obj);
- return ERR_PTR(ret);
- } else
- vfb->user_obj = user_obj;
- return &vfb->base;
-
-try_dmabuf:
- DRM_INFO("%s: trying buffer\n", __func__);
-
- ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
- if (ret) {
- DRM_ERROR("failed to find buffer: %i\n", ret);
- return ERR_PTR(-ENOENT);
- }
-
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- mode_cmd);
+ goto err_out;
+
+ /* Create the new framebuffer depending one what we got back */
+ if (bo)
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+ mode_cmd);
+ else if (surface)
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+ surface, &vfb, mode_cmd);
+ else
+ BUG();
- /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
- vmw_dmabuf_unreference(&bo);
+err_out:
+ /* vmw_user_lookup_handle takes one ref so does new_fb */
+ if (bo)
+ vmw_dmabuf_unreference(&bo);
+ if (surface)
+ vmw_surface_unreference(&surface);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1082,14 +1155,6 @@ try_dmabuf:
vfb->user_obj = user_obj;
return &vfb->base;
-
-err_not_scanout:
- DRM_ERROR("surface not marked as scanout\n");
- /* vmw_user_surface_lookup takes one ref */
- vmw_surface_unreference(&surface);
- ttm_base_object_unref(&user_obj);
-
- return ERR_PTR(-EINVAL);
}
static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1106,10 +1171,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, num_units;
int ret = 0; /* silence warning */
+ int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
@@ -1127,60 +1194,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
BUG_ON(surface == NULL);
BUG_ON(!clips || !num_clips);
+ tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+ if (unlikely(tmp == NULL)) {
+ DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_tmp;
+ }
+
+ left = clips->x;
+ right = clips->x + clips->w;
+ top = clips->y;
+ bottom = clips->y + clips->h;
+
+ for (i = 1; i < num_clips; i++) {
+ left = min_t(int, left, (int)clips[i].x);
+ right = max_t(int, right, (int)clips[i].x + clips[i].w);
+ top = min_t(int, top, (int)clips[i].y);
+ bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
}
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
- cmd->body.srcRect.left = 0;
- cmd->body.srcRect.right = surface->sizes[0].width;
- cmd->body.srcRect.top = 0;
- cmd->body.srcRect.bottom = surface->sizes[0].height;
blits = (SVGASignedRect *)&cmd[1];
+
+ cmd->body.srcRect.left = left;
+ cmd->body.srcRect.right = right;
+ cmd->body.srcRect.top = top;
+ cmd->body.srcRect.bottom = bottom;
+
for (i = 0; i < num_clips; i++) {
- blits[i].left = clips[i].x;
- blits[i].right = clips[i].x + clips[i].w;
- blits[i].top = clips[i].y;
- blits[i].bottom = clips[i].y + clips[i].h;
+ tmp[i].x1 = clips[i].x - left;
+ tmp[i].x2 = clips[i].x + clips[i].w - left;
+ tmp[i].y1 = clips[i].y - top;
+ tmp[i].y2 = clips[i].y + clips[i].h - top;
}
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
- int clip_x1 = destX - unit->crtc.x;
- int clip_y1 = destY - unit->crtc.y;
- int clip_x2 = clip_x1 + surface->sizes[0].width;
- int clip_y2 = clip_y1 + surface->sizes[0].height;
+ struct vmw_clip_rect clip;
+ int num;
+
+ clip.x1 = left + destX - unit->crtc.x;
+ clip.y1 = top + destY - unit->crtc.y;
+ clip.x2 = right + destX - unit->crtc.x;
+ clip.y2 = bottom + destY - unit->crtc.y;
/* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
+ if (clip.x1 >= unit->crtc.mode.hdisplay ||
+ clip.y1 >= unit->crtc.mode.vdisplay ||
+ clip.x2 <= 0 || clip.y2 <= 0)
continue;
+ /*
+ * In order for the clip rects to be correctly scaled
+ * the src and dest rects needs to be the same size.
+ */
+ cmd->body.destRect.left = clip.x1;
+ cmd->body.destRect.right = clip.x2;
+ cmd->body.destRect.top = clip.y1;
+ cmd->body.destRect.bottom = clip.y2;
+
+ /* create a clip rect of the crtc in dest coords */
+ clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+ clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+ clip.x1 = 0 - clip.x1;
+ clip.y1 = 0 - clip.y1;
+
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = sid;
-
cmd->body.destScreenId = unit->unit;
- /*
- * The blit command is a lot more resilient then the
- * readback command when it comes to clip rects. So its
- * okay to go out of bounds.
- */
+ /* clip and write blits to cmd stream */
+ vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
- cmd->body.destRect.left = clip_x1;
- cmd->body.destRect.right = clip_x2;
- cmd->body.destRect.top = clip_y1;
- cmd->body.destRect.bottom = clip_y2;
+ /* if no cliprects hit skip this */
+ if (num == 0)
+ continue;
+ /* recalculate package length */
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@@ -1189,6 +1291,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
}
kfree(cmd);
+out_free_tmp:
+ kfree(tmp);
return ret;
}
@@ -1809,7 +1913,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
- rects = kzalloc(rects_size, GFP_KERNEL);
+ rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+ GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
@@ -1824,10 +1929,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
}
for (i = 0; i < arg->num_outputs; ++i) {
- if (rects->x < 0 ||
- rects->y < 0 ||
- rects->x + rects->w > mode_config->max_width ||
- rects->y + rects->h > mode_config->max_height) {
+ if (rects[i].x < 0 ||
+ rects[i].y < 0 ||
+ rects[i].x + rects[i].w > mode_config->max_width ||
+ rects[i].y + rects[i].h > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
ret = -EINVAL;
goto out_free;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index af8e6e5bd96..e1cb8556355 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -62,9 +62,14 @@ struct vmw_framebuffer {
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY);
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y);
+
/**
* Base class display unit.
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 90c5e392849..8f8dbd43c33 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
+ struct vmw_display_unit *du = NULL;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
- int i = 0;
+ int i = 0, ret;
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
@@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
lds->last_num_active = lds->num_active;
+
+ /* Find the first du with a cursor. */
+ list_for_each_entry(entry, &lds->active, active) {
+ du = &entry->base;
+
+ if (!du->cursor_dmabuf)
+ continue;
+
+ ret = vmw_cursor_update_dmabuf(dev_priv,
+ du->cursor_dmabuf,
+ 64, 64,
+ du->hotspot_x,
+ du->hotspot_y);
+ if (ret == 0)
+ break;
+
+ DRM_ERROR("Could not update cursor image\n");
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 86c5e4cceb3..1c7f09e2681 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
write_unlock(lock);
}
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf)
+{
+ int ret;
+
+ BUG_ON(*out_surf || *out_buf);
+
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
+ if (!ret)
+ return 0;
+
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+ return ret;
+}
+
int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 848a56c0279..af353842f75 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1771,8 +1771,8 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 06ce996b8b6..4a441a6f996 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -266,7 +266,7 @@
#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
-#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
#define USB_VENDOR_ID_GLAB 0x06c2
#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 318e38e8537..5d760f3d21c 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
static struct spi_driver ad7314_driver = {
.driver = {
.name = "ad7314",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7314_probe,
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 52319340e18..04450f8bf5d 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi)
static struct spi_driver ads7871_driver = {
.driver = {
.name = DEVICE_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
index faa0884f61f..f2359a0093b 100644
--- a/drivers/hwmon/exynos4_tmu.c
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = {
.resume = exynos4_tmu_resume,
};
-static int __init exynos4_tmu_driver_init(void)
-{
- return platform_driver_register(&exynos4_tmu_driver);
-}
-module_init(exynos4_tmu_driver_init);
-
-static void __exit exynos4_tmu_driver_exit(void)
-{
- platform_driver_unregister(&exynos4_tmu_driver);
-}
-module_exit(exynos4_tmu_driver_exit);
+module_platform_driver(exynos4_tmu_driver);
MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 89aa9fb743a..9ba38f318ff 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = {
},
};
-static int __init gpio_fan_init(void)
-{
- return platform_driver_register(&gpio_fan_driver);
-}
-
-static void __exit gpio_fan_exit(void)
-{
- platform_driver_unregister(&gpio_fan_driver);
-}
-
-module_init(gpio_fan_init);
-module_exit(gpio_fan_exit);
+module_platform_driver(gpio_fan_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("GPIO FAN driver");
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index fea292d4340..5253d23361d 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
{
struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
struct completion *completion = &hwmon->read_completion;
- unsigned long t;
+ long t;
unsigned long val;
int ret;
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
.probe = jz4740_hwmon_probe,
.remove = __devexit_p(jz4740_hwmon_remove),
.driver = {
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = {
},
};
-static int __init jz4740_hwmon_init(void)
-{
- return platform_driver_register(&jz4740_hwmon_driver);
-}
-module_init(jz4740_hwmon_init);
-
-static void __exit jz4740_hwmon_exit(void)
-{
- platform_driver_unregister(&jz4740_hwmon_driver);
-}
-module_exit(jz4740_hwmon_exit);
+module_platform_driver(jz4740_hwmon_driver);
MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index eab11615dce..9b382ec2c3b 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = {
.id_table = ntc_thermistor_id,
};
-static int __init ntc_thermistor_init(void)
-{
- return platform_driver_register(&ntc_thermistor_driver);
-}
-
-module_init(ntc_thermistor_init);
-
-static void __exit ntc_thermistor_cleanup(void)
-{
- platform_driver_unregister(&ntc_thermistor_driver);
-}
-
-module_exit(ntc_thermistor_cleanup);
+module_platform_driver(ntc_thermistor_driver);
MODULE_DESCRIPTION("NTC Thermistor Driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index b39f52e2752..f6c26d19f52 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = {
.remove = __devexit_p(s3c_hwmon_remove),
};
-static int __init s3c_hwmon_init(void)
-{
- return platform_driver_register(&s3c_hwmon_driver);
-}
-
-static void __exit s3c_hwmon_exit(void)
-{
- platform_driver_unregister(&s3c_hwmon_driver);
-}
-
-module_init(s3c_hwmon_init);
-module_exit(s3c_hwmon_exit);
+module_platform_driver(s3c_hwmon_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C ADC HWMon driver");
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index e3b5c6039c2..79b6dabe316 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = {
.remove = sch5627_remove,
};
-static int __init sch5627_init(void)
-{
- return platform_driver_register(&sch5627_driver);
-}
-
-static void __exit sch5627_exit(void)
-{
- platform_driver_unregister(&sch5627_driver);
-}
+module_platform_driver(sch5627_driver);
MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5627_init);
-module_exit(sch5627_exit);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 244407aa79f..9d5236fb09b 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = {
.remove = sch5636_remove,
};
-static int __init sch5636_init(void)
-{
- return platform_driver_register(&sch5636_driver);
-}
-
-static void __exit sch5636_exit(void)
-{
- platform_driver_unregister(&sch5636_driver);
-}
+module_platform_driver(sch5636_driver);
MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5636_init);
-module_exit(sch5636_exit);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 57240740b16..0018c7dd009 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
},
};
-static int __init twl4030_madc_hwmon_init(void)
-{
- return platform_driver_register(&twl4030_madc_hwmon_driver);
-}
-
-module_init(twl4030_madc_hwmon_init);
-
-static void __exit twl4030_madc_hwmon_exit(void)
-{
- platform_driver_unregister(&twl4030_madc_hwmon_driver);
-}
-
-module_exit(twl4030_madc_hwmon_exit);
+module_platform_driver(twl4030_madc_hwmon_driver);
MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 3cd07bf42dc..b9a87e89bab 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = {
.remove = __devexit_p(env_remove),
};
-static int __init env_init(void)
-{
- return platform_driver_register(&env_driver);
-}
-
-static void __exit env_exit(void)
-{
- platform_driver_unregister(&env_driver);
-}
-
-module_init(env_init);
-module_exit(env_exit);
+module_platform_driver(env_driver);
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 97b1f834a47..9b598ed2602 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = {
},
};
-static int __init wm831x_hwmon_init(void)
-{
- return platform_driver_register(&wm831x_hwmon_driver);
-}
-module_init(wm831x_hwmon_init);
-
-static void __exit wm831x_hwmon_exit(void)
-{
- platform_driver_unregister(&wm831x_hwmon_driver);
-}
-module_exit(wm831x_hwmon_exit);
+module_platform_driver(wm831x_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM831x Hardware Monitoring");
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c
index 13290595ca8..3ff67edbdc4 100644
--- a/drivers/hwmon/wm8350-hwmon.c
+++ b/drivers/hwmon/wm8350-hwmon.c
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = {
},
};
-static int __init wm8350_hwmon_init(void)
-{
- return platform_driver_register(&wm8350_hwmon_driver);
-}
-module_init(wm8350_hwmon_init);
-
-static void __exit wm8350_hwmon_exit(void)
-{
- platform_driver_unregister(&wm8350_hwmon_driver);
-}
-module_exit(wm8350_hwmon_exit);
+module_platform_driver(wm8350_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM8350 Hardware Monitoring");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 8cebef49aea..18936ac9d51 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data;
+ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+ KBUILD_MODNAME, adap_info);
+ if (ret) {
+ pch_pci_err(pdev, "request_irq FAILED\n");
+ goto err_request_irq;
+ }
+
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false;
@@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.parent = &pdev->dev;
+ pch_i2c_init(&adap_info->pch_data[i]);
ret = i2c_add_adapter(pch_adap);
if (ret) {
pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
- goto err_i2c_add_adapter;
+ goto err_add_adapter;
}
-
- pch_i2c_init(&adap_info->pch_data[i]);
- }
- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
- KBUILD_MODNAME, adap_info);
- if (ret) {
- pch_pci_err(pdev, "request_irq FAILED\n");
- goto err_i2c_add_adapter;
}
pci_set_drvdata(pdev, adap_info);
pch_pci_dbg(pdev, "returns %d.\n", ret);
return 0;
-err_i2c_add_adapter:
+err_add_adapter:
for (j = 0; j < i; j++)
i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
+ free_irq(pdev->irq, adap_info);
+err_request_irq:
pci_iounmap(pdev, base_addr);
err_pci_iomap:
pci_release_regions(pdev);
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 835e47b39bc..03b61577888 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
clk_get_rate(i2c->clk);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a43d0023446..fa23faa20f0 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
- dev->fifo_size = 0;
+
+ dev->fifo_size = (dev->fifo_size / 2);
+
+ if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
dev->b_hw = 0; /* Disable hardware fixes */
- } else {
- dev->fifo_size = (dev->fifo_size / 2);
+ else
dev->b_hw = 1; /* Enable hardware fixes */
- }
+
/* calculate wakeup latency constraint for MPU */
if (dev->set_mpu_wkup_lat != NULL)
dev->latency = (1000000 * dev->fifo_size) /
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 2754cef86a0..4c171808168 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
/* first, try busy waiting briefly */
do {
+ cpu_relax();
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
} while ((iicstat & S3C2410_IICSTAT_START) && --spins);
@@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
#else
static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
- return -EINVAL;
+ return 0;
}
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 691276bafd7..e9cf51b1343 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -216,7 +216,9 @@ static int addr4_resolve(struct sockaddr_in *src_in,
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
if (!neigh || !(neigh->nud_state & NUD_VALID)) {
+ rcu_read_lock();
neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
+ rcu_read_unlock();
ret = -ENODATA;
if (neigh)
goto release;
@@ -274,15 +276,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
}
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
if (!neigh || !(neigh->nud_state & NUD_VALID)) {
if (neigh)
neigh_event_send(neigh, NULL);
ret = -ENODATA;
- goto put;
+ } else {
+ ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
}
-
- ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+ rcu_read_unlock();
put:
dst_release(dst);
return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 75ff821c0af..d0d4aa9f480 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2513,6 +2513,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
req.private_data_len = sizeof(struct cma_hdr) +
conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!req.private_data)
return -ENOMEM;
@@ -2562,6 +2565,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv->id.ps);
req.private_data_len = offset + conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!private_data)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index de6d0774e60..c88b12beef2 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1375,8 +1375,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
l2t = t3_l2t_get(tdev, neigh, neigh->dev);
+ rcu_read_unlock();
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1946,10 +1948,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(ep->dst);
/* get a l2t entry */
ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
+ rcu_read_unlock();
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b36cdac9c55..0747004313a 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
mpa->private_data_size = htons(ep->plen);
mpa->revision = mpa_rev_to_use;
- if (mpa_rev_to_use == 1)
+ if (mpa_rev_to_use == 1) {
ep->tried_with_mpa_v1 = 1;
+ ep->retry_with_mpa_v1 = 0;
+ }
if (mpa_rev_to_use == 2) {
mpa->private_data_size +=
@@ -1594,6 +1596,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
if (neigh->dev->flags & IFF_LOOPBACK) {
pdev = ip_dev_find(&init_net, peer_ip);
@@ -1620,6 +1623,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
rss_qid = dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(neigh->dev) * step];
}
+ rcu_read_unlock();
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1820,6 +1824,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
}
ep->dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(ep->dst);
/* get a l2t entry */
@@ -1856,6 +1861,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(neigh->dev) * step];
}
+ rcu_read_unlock();
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
@@ -2301,6 +2307,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(ep->dst);
/* get a l2t entry */
@@ -2339,6 +2346,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->retry_with_mpa_v1 = 0;
ep->tried_with_mpa_v1 = 0;
}
+ rcu_read_unlock();
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index f35a935267e..0f1607c8325 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr];
if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
- (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
+ (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
(*count)++;
if (++ptr == cq->size)
ptr = 0;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 77f3dbc0aaa..18836cdf1e1 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1244,7 +1244,8 @@ err_reg:
err_counter:
for (; i; --i)
- mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+ if (ibdev->counters[i - 1] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
err_map:
iounmap(ibdev->uar_map);
@@ -1275,7 +1276,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
}
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
- mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+ if (ibdev->counters[p] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index dfce9ea98a3..0a52d72371e 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1377,9 +1377,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
neigh_release(neigh);
}
- if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
+ if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
+ rcu_read_lock();
neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-
+ rcu_read_unlock();
+ }
ip_rt_put(rt);
return rc;
}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 574600ef5b4..a7403248d83 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1285,7 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
- dd->freectxts++;
+ dd->freectxts--;
ret = 0;
goto bail;
@@ -1794,7 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
if (dd->pageshadow)
unlock_expected_tids(rcd);
qib_stats.sps_ctxts--;
- dd->freectxts--;
+ dd->freectxts++;
}
mutex_unlock(&qib_mutex);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bd2162b95d..1d5895941e1 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2307,19 +2307,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
SYM_LSB(IBCCtrlA_0, MaxPktLen);
ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
- /* initially come up waiting for TS1, without sending anything. */
- val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- ppd->cpspec->ibcctrl_a = val;
/*
* Reset the PCS interface to the serdes (and also ibc, which is still
* in reset from above). Writes new value of ibcctrl_a as last step.
*/
qib_7322_mini_pcs_reset(ppd);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- /* clear the linkinit cmds */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
if (!ppd->cpspec->ibcctrl_b) {
unsigned lse = ppd->link_speed_enabled;
@@ -2385,6 +2377,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
set_vls(ppd);
+ /* initially come up DISABLED, without sending anything. */
+ val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* clear the linkinit cmds */
+ ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
+
/* be paranoid against later code motion, etc. */
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
@@ -5241,7 +5241,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
off */
if (ppd->dd->flags & QIB_HAS_QSFP) {
qd->t_insert = get_jiffies_64();
- schedule_work(&qd->work);
+ queue_work(ib_wq, &qd->work);
}
spin_lock_irqsave(&ppd->sdma_lock, flags);
if (__qib_sdma_running(ppd))
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index e06c4ed383f..fa71b1e666c 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
udelay(20); /* Generous RST dwell */
dd->f_gpio_mod(dd, mask, mask, mask);
- /* Spec says module can take up to two seconds! */
- mask = QSFP_GPIO_MOD_PRS_N;
- if (qd->ppd->hw_pidx)
- mask <<= QSFP_GPIO_PORT2_SHIFT;
-
- /* Do not try to wait here. Better to let event handle it */
- if (!qib_qsfp_mod_present(qd->ppd))
- goto bail;
- /* We see a module, but it may be unwise to look yet. Just schedule */
- qd->t_insert = get_jiffies_64();
- queue_work(ib_wq, &qd->work);
-bail:
return;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0ef9af94997..4115be54ba3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
{
struct ipoib_ah *ah;
+ struct ib_ah *vah;
ah = kmalloc(sizeof *ah, GFP_KERNEL);
if (!ah)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ah->dev = dev;
ah->last_send = 0;
kref_init(&ah->ref);
- ah->ah = ib_create_ah(pd, attr);
- if (IS_ERR(ah->ah)) {
+ vah = ib_create_ah(pd, attr);
+ if (IS_ERR(vah)) {
kfree(ah);
- ah = NULL;
- } else
+ ah = (struct ipoib_ah *)vah;
+ } else {
+ ah->ah = vah;
ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
+ }
return ah;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7567b600023..83695b48b01 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -432,7 +432,7 @@ static void path_rec_completion(int status,
spin_lock_irqsave(&priv->lock, flags);
- if (ah) {
+ if (!IS_ERR_OR_NULL(ah)) {
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -555,6 +555,7 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
+/* called with rcu_read_lock */
static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -636,6 +637,7 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
}
+/* called with rcu_read_lock */
static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
@@ -720,13 +722,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct neighbour *n = NULL;
unsigned long flags;
+ rcu_read_lock();
if (likely(skb_dst(skb)))
n = dst_get_neighbour(skb_dst(skb));
if (likely(n)) {
if (unlikely(!*to_ipoib_neigh(n))) {
ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ goto unlock;
}
neigh = *to_ipoib_neigh(n);
@@ -749,17 +752,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
ipoib_neigh_free(dev, neigh);
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ goto unlock;
}
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
- return NETDEV_TX_OK;
+ goto unlock;
}
} else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
- return NETDEV_TX_OK;
+ goto unlock;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
phdr->hwaddr + 4);
dev_kfree_skb_any(skb);
++dev->stats.tx_dropped;
- return NETDEV_TX_OK;
+ goto unlock;
}
unicast_arp_send(skb, dev, phdr);
}
}
-
+unlock:
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
dst = skb_dst(skb);
n = NULL;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_raw(dst);
if ((!dst || !n) && daddr) {
struct ipoib_pseudoheader *phdr =
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1b7a9768635..873bff97e69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
av.grh.dgid = mcast->mcmember.mgid;
ah = ipoib_create_ah(dev, priv->pd, &av);
- if (!ah) {
- ipoib_warn(priv, "ib_address_create failed\n");
+ if (IS_ERR(ah)) {
+ ipoib_warn(priv, "ib_address_create failed %ld\n",
+ -PTR_ERR(ah));
+ /* use original error */
+ return PTR_ERR(ah);
} else {
spin_lock_irq(&priv->lock);
mcast->ah = ah;
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
skb->dev = dev;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_raw(dst);
if (!dst || !n) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -722,6 +725,8 @@ out:
if (mcast && mcast->ah) {
struct dst_entry *dst = skb_dst(skb);
struct neighbour *n = NULL;
+
+ rcu_read_lock();
if (dst)
n = dst_get_neighbour(dst);
if (n && !*to_ipoib_neigh(n)) {
@@ -734,7 +739,7 @@ out:
list_add_tail(&neigh->list, &mcast->neigh_list);
}
}
-
+ rcu_read_unlock();
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
return;
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index 80793f1608e..06517e60e50 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -115,8 +115,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
{
struct cma3000_accl_data *data = dev_id;
- int datax, datay, dataz;
- u8 ctrl, mode, range, intr_status;
+ int datax, datay, dataz, intr_status;
+ u8 ctrl, mode, range;
intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
if (intr_status < 0)
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index c080b828e5d..a6dcd18e9ad 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -24,6 +24,7 @@
*/
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
@@ -1220,6 +1221,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
do {
psmouse_reset(psmouse);
+ if (retry) {
+ /*
+ * On some boxes, right after resuming, the touchpad
+ * needs some time to finish initializing (I assume
+ * it needs time to calibrate) and start responding
+ * to Synaptics-specific queries, so let's wait a
+ * bit.
+ */
+ ssleep(1);
+ }
error = synaptics_detect(psmouse, 0);
} while (error && ++retry < 3);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index da0d8761e77..2ee47d01a3b 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1470,6 +1470,9 @@ static const struct wacom_features wacom_features_0xE3 =
static const struct wacom_features wacom_features_0xE6 =
{ "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xEC =
+ { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
+ 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1611,6 +1614,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xE2) },
{ USB_DEVICE_WACOM(0xE3) },
{ USB_DEVICE_WACOM(0xE6) },
+ { USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c0c7820d4c4..bdc447fd476 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -405,6 +405,9 @@ int dmar_disabled = 0;
int dmar_disabled = 1;
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
@@ -3524,7 +3527,7 @@ found:
return 0;
}
-int dmar_parse_rmrr_atsr_dev(void)
+int __init dmar_parse_rmrr_atsr_dev(void)
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
struct dmar_atsr_unit *atsr, *atsr_n;
@@ -3647,6 +3650,8 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
+ intel_iommu_enabled = 1;
+
return 0;
}
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 07c9f189f31..6777ca04947 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
return ir_supported;
}
-int ir_dev_scope_init(void)
+int __init ir_dev_scope_init(void)
{
if (!intr_remapping_enabled)
return 0;
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 33ec9e46777..9021182c4b7 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
case IIOCDOCFINT:
if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
return (-EINVAL); /* invalid driver */
+ if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
+ sizeof(dioctl.cf_ctrl.msn))
+ return -EINVAL;
+ if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
+ sizeof(dioctl.cf_ctrl.fwd_nr))
+ return -EINVAL;
if ((i = cf_command(dioctl.cf_ctrl.drvid,
(cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
dioctl.cf_ctrl.cfproc,
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f73d7f7e02..2339d7396b9 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
char *c,
*e;
+ if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
+ sizeof(cfg->drvid))
+ return -EINVAL;
drvidx = -1;
chidx = -1;
strcpy(drvid, cfg->drvid);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7878712721b..b6907118283 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
*/
int i;
+ spin_lock_irq(&bitmap->lock);
for (i = 0; i < bitmap->file_pages; i++)
set_page_attr(bitmap, bitmap->filemap[i],
BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
+ spin_unlock_irq(&bitmap->lock);
}
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1605,7 +1607,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
for (chunk = s; chunk <= e; chunk++) {
sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
bitmap_set_memory_bits(bitmap, sec, 1);
+ spin_lock_irq(&bitmap->lock);
bitmap_file_set_bit(bitmap, sec);
+ spin_unlock_irq(&bitmap->lock);
if (sec < bitmap->mddev->recovery_cp)
/* We are asserting that the array is dirty,
* so move the recovery_cp address back so
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 84acfe7d10e..ee981737edf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
- list_del(&mddev->all_mddevs);
+ list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
sep = ",";
}
if (test_bit(Blocked, &rdev->flags) ||
- rdev->badblocks.unacked_exist) {
+ (rdev->badblocks.unacked_exist
+ && !test_bit(Faulty, &rdev->flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
else {
+ if (mddev->hold_active == UNTIL_IOCTL)
+ mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
+
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->show(mddev, page);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
rv = mddev_lock(mddev);
- if (mddev->hold_active == UNTIL_IOCTL)
- mddev->hold_active = 0;
if (!rv) {
rv = entry->store(mddev, page, length);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -7840,6 +7858,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
s + rdev->data_offset, sectors, acknowledged);
if (rv) {
/* Make sure they get written out promptly */
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 297e2609217..31670f8d6b6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (dev->written)
s->written++;
rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ rdev = NULL;
if (rdev) {
is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
&first_bad, &bad_sectors);
@@ -3063,12 +3065,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else if (!test_bit(Faulty, &rdev->flags)) {
+ else {
/* in sync if before recovery_offset */
if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
set_bit(R5_Insync, &dev->flags);
}
- if (test_bit(R5_WriteError, &dev->flags)) {
+ if (rdev && test_bit(R5_WriteError, &dev->flags)) {
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(Faulty, &rdev->flags)) {
s->handle_bad_blocks = 1;
@@ -3076,7 +3078,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
} else
clear_bit(R5_WriteError, &dev->flags);
}
- if (test_bit(R5_MadeGood, &dev->flags)) {
+ if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
if (!test_bit(Faulty, &rdev->flags)) {
s->handle_bad_blocks = 1;
atomic_inc(&rdev->nr_pending);
diff --git a/drivers/media/common/tuners/mxl5007t.c b/drivers/media/common/tuners/mxl5007t.c
index 7eb1bf75cd0..5d02221e99d 100644
--- a/drivers/media/common/tuners/mxl5007t.c
+++ b/drivers/media/common/tuners/mxl5007t.c
@@ -488,9 +488,10 @@ static int mxl5007t_write_regs(struct mxl5007t_state *state,
static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
{
+ u8 buf[2] = { 0xfb, reg };
struct i2c_msg msg[] = {
{ .addr = state->i2c_props.addr, .flags = 0,
- .buf = &reg, .len = 1 },
+ .buf = buf, .len = 2 },
{ .addr = state->i2c_props.addr, .flags = I2C_M_RD,
.buf = val, .len = 1 },
};
diff --git a/drivers/media/common/tuners/tda18218.c b/drivers/media/common/tuners/tda18218.c
index aacfe2387e2..4fc29730a12 100644
--- a/drivers/media/common/tuners/tda18218.c
+++ b/drivers/media/common/tuners/tda18218.c
@@ -141,7 +141,7 @@ static int tda18218_set_params(struct dvb_frontend *fe,
switch (params->u.ofdm.bandwidth) {
case BANDWIDTH_6_MHZ:
LP_Fc = 0;
- LO_Frac = params->frequency + 4000000;
+ LO_Frac = params->frequency + 3000000;
break;
case BANDWIDTH_7_MHZ:
LP_Fc = 1;
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 303f22ea04c..01bb8daf4b0 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -189,7 +189,7 @@ struct ati_remote {
dma_addr_t inbuf_dma;
dma_addr_t outbuf_dma;
- unsigned char old_data[2]; /* Detect duplicate events */
+ unsigned char old_data; /* Detect duplicate events */
unsigned long old_jiffies;
unsigned long acc_jiffies; /* handle acceleration */
unsigned long first_jiffies;
@@ -221,35 +221,35 @@ struct ati_remote {
/* Translation table from hardware messages to input events. */
static const struct {
short kind;
- unsigned char data1, data2;
+ unsigned char data;
int type;
unsigned int code;
int value;
} ati_remote_tbl[] = {
/* Directional control pad axes */
- {KIND_ACCEL, 0x35, 0x70, EV_REL, REL_X, -1}, /* left */
- {KIND_ACCEL, 0x36, 0x71, EV_REL, REL_X, 1}, /* right */
- {KIND_ACCEL, 0x37, 0x72, EV_REL, REL_Y, -1}, /* up */
- {KIND_ACCEL, 0x38, 0x73, EV_REL, REL_Y, 1}, /* down */
+ {KIND_ACCEL, 0x70, EV_REL, REL_X, -1}, /* left */
+ {KIND_ACCEL, 0x71, EV_REL, REL_X, 1}, /* right */
+ {KIND_ACCEL, 0x72, EV_REL, REL_Y, -1}, /* up */
+ {KIND_ACCEL, 0x73, EV_REL, REL_Y, 1}, /* down */
/* Directional control pad diagonals */
- {KIND_LU, 0x39, 0x74, EV_REL, 0, 0}, /* left up */
- {KIND_RU, 0x3a, 0x75, EV_REL, 0, 0}, /* right up */
- {KIND_LD, 0x3c, 0x77, EV_REL, 0, 0}, /* left down */
- {KIND_RD, 0x3b, 0x76, EV_REL, 0, 0}, /* right down */
+ {KIND_LU, 0x74, EV_REL, 0, 0}, /* left up */
+ {KIND_RU, 0x75, EV_REL, 0, 0}, /* right up */
+ {KIND_LD, 0x77, EV_REL, 0, 0}, /* left down */
+ {KIND_RD, 0x76, EV_REL, 0, 0}, /* right down */
/* "Mouse button" buttons */
- {KIND_LITERAL, 0x3d, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
- {KIND_LITERAL, 0x3e, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
- {KIND_LITERAL, 0x41, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
- {KIND_LITERAL, 0x42, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
+ {KIND_LITERAL, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
+ {KIND_LITERAL, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
+ {KIND_LITERAL, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
+ {KIND_LITERAL, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
/* Artificial "doubleclick" events are generated by the hardware.
* They are mapped to the "side" and "extra" mouse buttons here. */
- {KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
- {KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
+ {KIND_FILTERED, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
+ {KIND_FILTERED, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
/* Non-mouse events are handled by rc-core */
- {KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
+ {KIND_END, 0x00, EV_MAX + 1, 0, 0}
};
/* Local function prototypes */
@@ -397,25 +397,6 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
}
/*
- * ati_remote_event_lookup
- */
-static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
-{
- int i;
-
- for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
- /*
- * Decide if the table entry matches the remote input.
- */
- if (ati_remote_tbl[i].data1 == d1 &&
- ati_remote_tbl[i].data2 == d2)
- return i;
-
- }
- return -1;
-}
-
-/*
* ati_remote_compute_accel
*
* Implements acceleration curve for directional control pad
@@ -463,7 +444,15 @@ static void ati_remote_input_report(struct urb *urb)
int index = -1;
int acc;
int remote_num;
- unsigned char scancode[2];
+ unsigned char scancode;
+ int i;
+
+ /*
+ * data[0] = 0x14
+ * data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
+ * data[2] = the key code (with toggle bit in MSB with some models)
+ * data[3] = channel << 4 (the low 4 bits must be zero)
+ */
/* Deal with strange looking inputs */
if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@@ -472,6 +461,13 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
+ if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
+ dbginfo(&ati_remote->interface->dev,
+ "wrong checksum in input: %02x %02x %02x %02x\n",
+ data[0], data[1], data[2], data[3]);
+ return;
+ }
+
/* Mask unwanted remote channels. */
/* note: remote_num is 0-based, channel 1 on remote == 0 here */
remote_num = (data[3] >> 4) & 0x0f;
@@ -482,31 +478,30 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
- scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
-
/*
- * Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
- * so we have to clear them. The first bit is a bit tricky as the
- * "non-toggled" state depends on remote_num, so we xor it with the
- * second bit which is only used for toggle.
+ * MSB is a toggle code, though only used by some devices
+ * (e.g. SnapStream Firefly)
*/
- scancode[0] ^= (data[2] & 0x80);
-
- scancode[1] = data[2] & ~0x80;
+ scancode = data[2] & 0x7f;
- /* Look up event code index in mouse translation table. */
- index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
+ /* Look up event code index in the mouse translation table. */
+ for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+ if (scancode == ati_remote_tbl[i].data) {
+ index = i;
+ break;
+ }
+ }
if (index >= 0) {
dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
- remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+ "channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
+ remote_num, data[2], index, ati_remote_tbl[index].code);
if (!dev)
return; /* no mouse device */
} else
dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
- remote_num, data[1], data[2], scancode[0], scancode[1]);
+ "channel 0x%02x; key data %02x, scancode %02x\n",
+ remote_num, data[2], scancode);
if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
@@ -523,8 +518,7 @@ static void ati_remote_input_report(struct urb *urb)
unsigned long now = jiffies;
/* Filter duplicate events which happen "too close" together. */
- if (ati_remote->old_data[0] == data[1] &&
- ati_remote->old_data[1] == data[2] &&
+ if (ati_remote->old_data == data[2] &&
time_before(now, ati_remote->old_jiffies +
msecs_to_jiffies(repeat_filter))) {
ati_remote->repeat_count++;
@@ -533,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
ati_remote->first_jiffies = now;
}
- ati_remote->old_data[0] = data[1];
- ati_remote->old_data[1] = data[2];
+ ati_remote->old_data = data[2];
ati_remote->old_jiffies = now;
/* Ensure we skip at least the 4 first duplicate events (generated
@@ -549,14 +542,13 @@ static void ati_remote_input_report(struct urb *urb)
if (index < 0) {
/* Not a mouse event, hand it to rc-core. */
- u32 rc_code = (scancode[0] << 8) | scancode[1];
/*
* We don't use the rc-core repeat handling yet as
* it would cause ghost repeats which would be a
* regression for this driver.
*/
- rc_keydown_notimeout(ati_remote->rdev, rc_code,
+ rc_keydown_notimeout(ati_remote->rdev, scancode,
data[2]);
rc_keyup(ati_remote->rdev);
return;
@@ -607,8 +599,7 @@ static void ati_remote_input_report(struct urb *urb)
input_sync(dev);
ati_remote->old_jiffies = jiffies;
- ati_remote->old_data[0] = data[1];
- ati_remote->old_data[1] = data[2];
+ ati_remote->old_data = data[2];
}
}
diff --git a/drivers/media/rc/keymaps/rc-ati-x10.c b/drivers/media/rc/keymaps/rc-ati-x10.c
index e1b8b2605c4..81506440ede 100644
--- a/drivers/media/rc/keymaps/rc-ati-x10.c
+++ b/drivers/media/rc/keymaps/rc-ati-x10.c
@@ -27,55 +27,55 @@
#include <media/rc-map.h>
static struct rc_map_table ati_x10[] = {
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xc500, KEY_A },
- { 0xc601, KEY_B },
- { 0xde19, KEY_C },
- { 0xe01b, KEY_D },
- { 0xe621, KEY_E },
- { 0xe823, KEY_F },
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x00, KEY_A },
+ { 0x01, KEY_B },
+ { 0x19, KEY_C },
+ { 0x1b, KEY_D },
+ { 0x21, KEY_E },
+ { 0x23, KEY_F },
- { 0xdd18, KEY_KPENTER }, /* "check" */
- { 0xdb16, KEY_MENU }, /* "menu" */
- { 0xc702, KEY_POWER }, /* Power */
- { 0xc803, KEY_TV }, /* TV */
- { 0xc904, KEY_DVD }, /* DVD */
- { 0xca05, KEY_WWW }, /* WEB */
- { 0xcb06, KEY_BOOKMARKS }, /* "book" */
- { 0xcc07, KEY_EDIT }, /* "hand" */
- { 0xe11c, KEY_COFFEE }, /* "timer" */
- { 0xe520, KEY_FRONT }, /* "max" */
- { 0xe21d, KEY_LEFT }, /* left */
- { 0xe41f, KEY_RIGHT }, /* right */
- { 0xe722, KEY_DOWN }, /* down */
- { 0xdf1a, KEY_UP }, /* up */
- { 0xe31e, KEY_OK }, /* "OK" */
- { 0xce09, KEY_VOLUMEDOWN }, /* VOL + */
- { 0xcd08, KEY_VOLUMEUP }, /* VOL - */
- { 0xcf0a, KEY_MUTE }, /* MUTE */
- { 0xd00b, KEY_CHANNELUP }, /* CH + */
- { 0xd10c, KEY_CHANNELDOWN },/* CH - */
- { 0xec27, KEY_RECORD }, /* ( o) red */
- { 0xea25, KEY_PLAY }, /* ( >) */
- { 0xe924, KEY_REWIND }, /* (<<) */
- { 0xeb26, KEY_FORWARD }, /* (>>) */
- { 0xed28, KEY_STOP }, /* ([]) */
- { 0xee29, KEY_PAUSE }, /* ('') */
- { 0xf02b, KEY_PREVIOUS }, /* (<-) */
- { 0xef2a, KEY_NEXT }, /* (>+) */
- { 0xf22d, KEY_INFO }, /* PLAYING */
- { 0xf32e, KEY_HOME }, /* TOP */
- { 0xf42f, KEY_END }, /* END */
- { 0xf530, KEY_SELECT }, /* SELECT */
+ { 0x18, KEY_KPENTER }, /* "check" */
+ { 0x16, KEY_MENU }, /* "menu" */
+ { 0x02, KEY_POWER }, /* Power */
+ { 0x03, KEY_TV }, /* TV */
+ { 0x04, KEY_DVD }, /* DVD */
+ { 0x05, KEY_WWW }, /* WEB */
+ { 0x06, KEY_BOOKMARKS }, /* "book" */
+ { 0x07, KEY_EDIT }, /* "hand" */
+ { 0x1c, KEY_COFFEE }, /* "timer" */
+ { 0x20, KEY_FRONT }, /* "max" */
+ { 0x1d, KEY_LEFT }, /* left */
+ { 0x1f, KEY_RIGHT }, /* right */
+ { 0x22, KEY_DOWN }, /* down */
+ { 0x1a, KEY_UP }, /* up */
+ { 0x1e, KEY_OK }, /* "OK" */
+ { 0x09, KEY_VOLUMEDOWN }, /* VOL + */
+ { 0x08, KEY_VOLUMEUP }, /* VOL - */
+ { 0x0a, KEY_MUTE }, /* MUTE */
+ { 0x0b, KEY_CHANNELUP }, /* CH + */
+ { 0x0c, KEY_CHANNELDOWN },/* CH - */
+ { 0x27, KEY_RECORD }, /* ( o) red */
+ { 0x25, KEY_PLAY }, /* ( >) */
+ { 0x24, KEY_REWIND }, /* (<<) */
+ { 0x26, KEY_FORWARD }, /* (>>) */
+ { 0x28, KEY_STOP }, /* ([]) */
+ { 0x29, KEY_PAUSE }, /* ('') */
+ { 0x2b, KEY_PREVIOUS }, /* (<-) */
+ { 0x2a, KEY_NEXT }, /* (>+) */
+ { 0x2d, KEY_INFO }, /* PLAYING */
+ { 0x2e, KEY_HOME }, /* TOP */
+ { 0x2f, KEY_END }, /* END */
+ { 0x30, KEY_SELECT }, /* SELECT */
};
static struct rc_map_list ati_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-medion-x10.c b/drivers/media/rc/keymaps/rc-medion-x10.c
index 09e2cc01d11..479cdb89781 100644
--- a/drivers/media/rc/keymaps/rc-medion-x10.c
+++ b/drivers/media/rc/keymaps/rc-medion-x10.c
@@ -25,70 +25,70 @@
#include <media/rc-map.h>
static struct rc_map_table medion_x10[] = {
- { 0xf12c, KEY_TV }, /* TV */
- { 0xf22d, KEY_VCR }, /* VCR */
- { 0xc904, KEY_DVD }, /* DVD */
- { 0xcb06, KEY_AUDIO }, /* MUSIC */
-
- { 0xf32e, KEY_RADIO }, /* RADIO */
- { 0xca05, KEY_DIRECTORY }, /* PHOTO */
- { 0xf42f, KEY_INFO }, /* TV-PREVIEW */
- { 0xf530, KEY_LIST }, /* CHANNEL-LST */
-
- { 0xe01b, KEY_SETUP }, /* SETUP */
- { 0xf631, KEY_VIDEO }, /* VIDEO DESKTOP */
-
- { 0xcd08, KEY_VOLUMEDOWN }, /* VOL - */
- { 0xce09, KEY_VOLUMEUP }, /* VOL + */
- { 0xd00b, KEY_CHANNELUP }, /* CHAN + */
- { 0xd10c, KEY_CHANNELDOWN }, /* CHAN - */
- { 0xc500, KEY_MUTE }, /* MUTE */
-
- { 0xf732, KEY_RED }, /* red */
- { 0xf833, KEY_GREEN }, /* green */
- { 0xf934, KEY_YELLOW }, /* yellow */
- { 0xfa35, KEY_BLUE }, /* blue */
- { 0xdb16, KEY_TEXT }, /* TXT */
-
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xe11c, KEY_SEARCH }, /* TV/RAD, CH SRC */
- { 0xe520, KEY_DELETE }, /* DELETE */
-
- { 0xfb36, KEY_KEYBOARD }, /* RENAME */
- { 0xdd18, KEY_SCREEN }, /* SNAPSHOT */
-
- { 0xdf1a, KEY_UP }, /* up */
- { 0xe722, KEY_DOWN }, /* down */
- { 0xe21d, KEY_LEFT }, /* left */
- { 0xe41f, KEY_RIGHT }, /* right */
- { 0xe31e, KEY_OK }, /* OK */
-
- { 0xfc37, KEY_SELECT }, /* ACQUIRE IMAGE */
- { 0xfd38, KEY_EDIT }, /* EDIT IMAGE */
-
- { 0xe924, KEY_REWIND }, /* rewind (<<) */
- { 0xea25, KEY_PLAY }, /* play ( >) */
- { 0xeb26, KEY_FORWARD }, /* forward (>>) */
- { 0xec27, KEY_RECORD }, /* record ( o) */
- { 0xed28, KEY_STOP }, /* stop ([]) */
- { 0xee29, KEY_PAUSE }, /* pause ('') */
-
- { 0xe621, KEY_PREVIOUS }, /* prev */
- { 0xfe39, KEY_SWITCHVIDEOMODE }, /* F SCR */
- { 0xe823, KEY_NEXT }, /* next */
- { 0xde19, KEY_MENU }, /* MENU */
- { 0xff3a, KEY_LANGUAGE }, /* AUDIO */
-
- { 0xc702, KEY_POWER }, /* POWER */
+ { 0x2c, KEY_TV }, /* TV */
+ { 0x2d, KEY_VCR }, /* VCR */
+ { 0x04, KEY_DVD }, /* DVD */
+ { 0x06, KEY_AUDIO }, /* MUSIC */
+
+ { 0x2e, KEY_RADIO }, /* RADIO */
+ { 0x05, KEY_DIRECTORY }, /* PHOTO */
+ { 0x2f, KEY_INFO }, /* TV-PREVIEW */
+ { 0x30, KEY_LIST }, /* CHANNEL-LST */
+
+ { 0x1b, KEY_SETUP }, /* SETUP */
+ { 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
+
+ { 0x08, KEY_VOLUMEDOWN }, /* VOL - */
+ { 0x09, KEY_VOLUMEUP }, /* VOL + */
+ { 0x0b, KEY_CHANNELUP }, /* CHAN + */
+ { 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
+ { 0x00, KEY_MUTE }, /* MUTE */
+
+ { 0x32, KEY_RED }, /* red */
+ { 0x33, KEY_GREEN }, /* green */
+ { 0x34, KEY_YELLOW }, /* yellow */
+ { 0x35, KEY_BLUE }, /* blue */
+ { 0x16, KEY_TEXT }, /* TXT */
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
+ { 0x20, KEY_DELETE }, /* DELETE */
+
+ { 0x36, KEY_KEYBOARD }, /* RENAME */
+ { 0x18, KEY_SCREEN }, /* SNAPSHOT */
+
+ { 0x1a, KEY_UP }, /* up */
+ { 0x22, KEY_DOWN }, /* down */
+ { 0x1d, KEY_LEFT }, /* left */
+ { 0x1f, KEY_RIGHT }, /* right */
+ { 0x1e, KEY_OK }, /* OK */
+
+ { 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
+ { 0x38, KEY_EDIT }, /* EDIT IMAGE */
+
+ { 0x24, KEY_REWIND }, /* rewind (<<) */
+ { 0x25, KEY_PLAY }, /* play ( >) */
+ { 0x26, KEY_FORWARD }, /* forward (>>) */
+ { 0x27, KEY_RECORD }, /* record ( o) */
+ { 0x28, KEY_STOP }, /* stop ([]) */
+ { 0x29, KEY_PAUSE }, /* pause ('') */
+
+ { 0x21, KEY_PREVIOUS }, /* prev */
+ { 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
+ { 0x23, KEY_NEXT }, /* next */
+ { 0x19, KEY_MENU }, /* MENU */
+ { 0x3a, KEY_LANGUAGE }, /* AUDIO */
+
+ { 0x02, KEY_POWER }, /* POWER */
};
static struct rc_map_list medion_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-snapstream-firefly.c b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
index ef146520931..c7f33ec719b 100644
--- a/drivers/media/rc/keymaps/rc-snapstream-firefly.c
+++ b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
@@ -22,63 +22,63 @@
#include <media/rc-map.h>
static struct rc_map_table snapstream_firefly[] = {
- { 0xf12c, KEY_ZOOM }, /* Maximize */
- { 0xc702, KEY_CLOSE },
-
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xdb16, KEY_BACK },
- { 0xdd18, KEY_KPENTER }, /* ent */
-
- { 0xce09, KEY_VOLUMEUP },
- { 0xcd08, KEY_VOLUMEDOWN },
- { 0xcf0a, KEY_MUTE },
- { 0xd00b, KEY_CHANNELUP },
- { 0xd10c, KEY_CHANNELDOWN },
- { 0xc500, KEY_VENDOR }, /* firefly */
-
- { 0xf32e, KEY_INFO },
- { 0xf42f, KEY_OPTION },
-
- { 0xe21d, KEY_LEFT },
- { 0xe41f, KEY_RIGHT },
- { 0xe722, KEY_DOWN },
- { 0xdf1a, KEY_UP },
- { 0xe31e, KEY_OK },
-
- { 0xe11c, KEY_MENU },
- { 0xe520, KEY_EXIT },
-
- { 0xec27, KEY_RECORD },
- { 0xea25, KEY_PLAY },
- { 0xed28, KEY_STOP },
- { 0xe924, KEY_REWIND },
- { 0xeb26, KEY_FORWARD },
- { 0xee29, KEY_PAUSE },
- { 0xf02b, KEY_PREVIOUS },
- { 0xef2a, KEY_NEXT },
-
- { 0xcb06, KEY_AUDIO }, /* Music */
- { 0xca05, KEY_IMAGES }, /* Photos */
- { 0xc904, KEY_DVD },
- { 0xc803, KEY_TV },
- { 0xcc07, KEY_VIDEO },
-
- { 0xc601, KEY_HELP },
- { 0xf22d, KEY_MODE }, /* Mouse */
-
- { 0xde19, KEY_A },
- { 0xe01b, KEY_B },
- { 0xe621, KEY_C },
- { 0xe823, KEY_D },
+ { 0x2c, KEY_ZOOM }, /* Maximize */
+ { 0x02, KEY_CLOSE },
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x16, KEY_BACK },
+ { 0x18, KEY_KPENTER }, /* ent */
+
+ { 0x09, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x0a, KEY_MUTE },
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+ { 0x00, KEY_VENDOR }, /* firefly */
+
+ { 0x2e, KEY_INFO },
+ { 0x2f, KEY_OPTION },
+
+ { 0x1d, KEY_LEFT },
+ { 0x1f, KEY_RIGHT },
+ { 0x22, KEY_DOWN },
+ { 0x1a, KEY_UP },
+ { 0x1e, KEY_OK },
+
+ { 0x1c, KEY_MENU },
+ { 0x20, KEY_EXIT },
+
+ { 0x27, KEY_RECORD },
+ { 0x25, KEY_PLAY },
+ { 0x28, KEY_STOP },
+ { 0x24, KEY_REWIND },
+ { 0x26, KEY_FORWARD },
+ { 0x29, KEY_PAUSE },
+ { 0x2b, KEY_PREVIOUS },
+ { 0x2a, KEY_NEXT },
+
+ { 0x06, KEY_AUDIO }, /* Music */
+ { 0x05, KEY_IMAGES }, /* Photos */
+ { 0x04, KEY_DVD },
+ { 0x03, KEY_TV },
+ { 0x07, KEY_VIDEO },
+
+ { 0x01, KEY_HELP },
+ { 0x2d, KEY_MODE }, /* Mouse */
+
+ { 0x19, KEY_A },
+ { 0x1b, KEY_B },
+ { 0x21, KEY_C },
+ { 0x23, KEY_D },
};
static struct rc_map_list snapstream_firefly_map = {
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 39fc923fc46..1c6015a04f9 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -162,11 +162,14 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
switch (tv.model) {
case 72000: /* WinTV-HVR950q (Retail, IR, ATSC/QAM */
case 72001: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72101: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72201: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72211: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
break;
@@ -324,6 +327,10 @@ struct usb_device_id au0828_usb_id_table[] = {
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
{ USB_DEVICE(0x2040, 0x8200),
.driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
+ { USB_DEVICE(0x2040, 0x7260),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+ { USB_DEVICE(0x2040, 0x7213),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
{ },
};
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 89d09a8914f..82c8817bd32 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -162,7 +162,6 @@ struct m5mols_version {
* @pad: media pad
* @ffmt: current fmt according to resolution type
* @res_type: current resolution type
- * @code: current code
* @irq_waitq: waitqueue for the capture
* @work_irq: workqueue for the IRQ
* @flags: state variable for the interrupt handler
@@ -192,7 +191,6 @@ struct m5mols_info {
struct media_pad pad;
struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
int res_type;
- enum v4l2_mbus_pixelcode code;
wait_queue_head_t irq_waitq;
struct work_struct work_irq;
unsigned long flags;
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 05ab3700647..e0f09e53180 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -334,7 +334,7 @@ int m5mols_mode(struct m5mols_info *info, u8 mode)
int ret = -EINVAL;
u8 reg;
- if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+ if (mode < REG_PARAMETER || mode > REG_CAPTURE)
return ret;
ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
@@ -511,9 +511,6 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct m5mols_info *info = to_m5mols(sd);
struct v4l2_mbus_framefmt *format;
- if (fmt->pad != 0)
- return -EINVAL;
-
format = __find_format(info, fh, fmt->which, info->res_type);
if (!format)
return -EINVAL;
@@ -532,9 +529,6 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
u32 resolution = 0;
int ret;
- if (fmt->pad != 0)
- return -EINVAL;
-
ret = __find_resolution(sd, format, &type, &resolution);
if (ret < 0)
return ret;
@@ -543,13 +537,14 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (!sfmt)
return 0;
- *sfmt = m5mols_default_ffmt[type];
- sfmt->width = format->width;
- sfmt->height = format->height;
+
+ format->code = m5mols_default_ffmt[type].code;
+ format->colorspace = V4L2_COLORSPACE_JPEG;
+ format->field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ *sfmt = *format;
info->resolution = resolution;
- info->code = format->code;
info->res_type = type;
}
@@ -626,13 +621,14 @@ static int m5mols_start_monitor(struct m5mols_info *info)
static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
{
struct m5mols_info *info = to_m5mols(sd);
+ u32 code = info->ffmt[info->res_type].code;
if (enable) {
int ret = -EINVAL;
- if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+ if (is_code(code, M5MOLS_RESTYPE_MONITOR))
ret = m5mols_start_monitor(info);
- if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+ if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
ret = m5mols_start_capture(info);
return ret;
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index cf2c0fb95f2..398f96ffd35 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -955,6 +955,7 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->rect.height = MT9M111_MAX_HEIGHT;
mt9m111->fmt = &mt9m111_colour_fmts[0];
mt9m111->lastpage = -1;
+ mutex_init(&mt9m111->power_lock);
ret = mt9m111_video_probe(client);
if (ret) {
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index 32114a3c0ca..7b34b11daf2 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -1083,8 +1083,10 @@ static int mt9t112_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
ret = mt9t112_camera_probe(client);
- if (ret)
+ if (ret) {
kfree(priv);
+ return ret;
+ }
/* Cannot fail: using the default supported pixel code */
mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 9c5c19f142d..ee0d0b39cd1 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -38,6 +38,7 @@
#include <linux/irq.h>
#include <linux/videodev2.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
@@ -2169,6 +2170,14 @@ static int __init omap_vout_probe(struct platform_device *pdev)
vid_dev->num_displays = 0;
for_each_dss_dev(dssdev) {
omap_dss_get_device(dssdev);
+
+ if (!dssdev->driver) {
+ dev_warn(&pdev->dev, "no driver for display: %s\n",
+ dssdev->name);
+ omap_dss_put_device(dssdev);
+ continue;
+ }
+
vid_dev->displays[vid_dev->num_displays++] = dssdev;
}
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index e87ae2f634b..6a6cf388bae 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -24,6 +24,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/media/video/omap24xxcam-dma.c b/drivers/media/video/omap24xxcam-dma.c
index 1d54b86c936..3ea38a8def8 100644
--- a/drivers/media/video/omap24xxcam-dma.c
+++ b/drivers/media/video/omap24xxcam-dma.c
@@ -506,7 +506,7 @@ int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
unsigned long flags;
struct sgdma_state *sg_state;
- if ((sglen < 0) || ((sglen > 0) & !sglist))
+ if ((sglen < 0) || ((sglen > 0) && !sglist))
return -EINVAL;
spin_lock_irqsave(&sgdma->lock, flags);
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index d1000723c5a..f2290578448 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -26,6 +26,7 @@
#include <asm/cacheflush.h>
#include <linux/clk.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c
index 9f2d26b1d4c..6806345ec2f 100644
--- a/drivers/media/video/ov6650.c
+++ b/drivers/media/video/ov6650.c
@@ -540,7 +540,7 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe,
static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
struct soc_camera_sense *sense = icd->sense;
struct ov6650 *priv = to_ov6650(client);
bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index c8d91b0cd9b..2cc3b916672 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -98,6 +98,10 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
set_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+ fimc_hw_reset(fimc);
+ cap->buf_index = 0;
+
spin_unlock_irqrestore(&fimc->slock, flags);
if (streaming)
@@ -137,7 +141,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
struct fimc_dev *fimc = ctx->fimc_dev;
int ret;
- if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+ if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
return 0;
spin_lock(&ctx->slock);
@@ -150,7 +154,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
fimc_hw_set_rotation(ctx);
fimc_prepare_dma_offset(ctx, &ctx->d_frame);
fimc_hw_set_out_dma(ctx);
- set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
}
spin_unlock(&ctx->slock);
return ret;
@@ -164,7 +168,6 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
int min_bufs;
int ret;
- fimc_hw_reset(fimc);
vid_cap->frame_count = 0;
ret = fimc_init_capture(fimc);
@@ -523,7 +526,7 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
- if (fimc->id == 1 && var->pix_hoff)
+ if (var->min_vsize_align == 1 && !rotation)
align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
depth = fimc_get_format_depth(ffmt);
@@ -1239,6 +1242,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&fimc->lock);
set_frame_bounds(ff, mf->width, mf->height);
+ fimc->vid_cap.mf = *mf;
ff->fmt = ffmt;
/* Reset the crop rectangle if required. */
@@ -1375,7 +1379,7 @@ static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
media_entity_cleanup(&sd->entity);
v4l2_device_unregister_subdev(sd);
kfree(sd);
- sd = NULL;
+ fimc->vid_cap.subdev = NULL;
}
/* Set default format at the sensor and host interface */
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index 19ca6db38b2..07c6254faee 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -37,7 +37,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
static struct fimc_fmt fimc_formats[] = {
{
.name = "RGB565",
- .fourcc = V4L2_PIX_FMT_RGB565X,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.depth = { 16 },
.color = S5P_FIMC_RGB565,
.memplanes = 1,
@@ -1038,12 +1038,11 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
mod_x = 6; /* 64 x 32 pixels tile */
mod_y = 5;
} else {
- if (fimc->id == 1 && variant->pix_hoff)
+ if (variant->min_vsize_align == 1)
mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
else
- mod_y = mod_x;
+ mod_y = ffs(variant->min_vsize_align) - 1;
}
- dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_w);
v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
&pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
@@ -1226,10 +1225,10 @@ static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
/* Get pixel alignment constraints. */
- if (fimc->id == 1 && fimc->variant->pix_hoff)
+ if (fimc->variant->min_vsize_align == 1)
halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
else
- halign = ffs(min_size) - 1;
+ halign = ffs(fimc->variant->min_vsize_align) - 1;
for (i = 0; i < f->fmt->colplanes; i++)
depth += f->fmt->depth[i];
@@ -1615,7 +1614,6 @@ static int fimc_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
fimc->pdata = pdata;
- set_bit(ST_LPM, &fimc->state);
init_waitqueue_head(&fimc->irq_queue);
spin_lock_init(&fimc->slock);
@@ -1707,8 +1705,6 @@ static int fimc_runtime_resume(struct device *dev)
/* Enable clocks and perform basic initalization */
clk_enable(fimc->clock[CLK_GATE]);
fimc_hw_reset(fimc);
- if (fimc->variant->out_buf_count > 4)
- fimc_hw_set_dma_seq(fimc, 0xF);
/* Resume the capture or mem-to-mem device */
if (fimc_capture_busy(fimc))
@@ -1750,8 +1746,6 @@ static int fimc_resume(struct device *dev)
return 0;
}
fimc_hw_reset(fimc);
- if (fimc->variant->out_buf_count > 4)
- fimc_hw_set_dma_seq(fimc, 0xF);
spin_unlock_irqrestore(&fimc->slock, flags);
if (fimc_capture_busy(fimc))
@@ -1780,7 +1774,6 @@ static int __devexit fimc_remove(struct platform_device *pdev)
struct fimc_dev *fimc = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
- fimc_runtime_suspend(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
@@ -1840,6 +1833,7 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[0],
};
@@ -1849,6 +1843,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1861,6 +1856,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1874,6 +1870,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 1,
+ .min_vsize_align = 1,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[2],
};
@@ -1884,6 +1881,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[2],
};
@@ -1898,6 +1896,7 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 2,
+ .min_vsize_align = 1,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1910,6 +1909,7 @@ static struct samsung_fimc_variant fimc3_variant_exynos4 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 2,
+ .min_vsize_align = 1,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[3],
};
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index a6936dad5b1..c7f01c47b20 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -377,6 +377,7 @@ struct fimc_pix_limit {
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
* @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
* @out_buf_count: the number of buffers in output DMA sequence
*/
struct samsung_fimc_variant {
@@ -390,6 +391,7 @@ struct samsung_fimc_variant {
u16 min_inp_pixsize;
u16 min_out_pixsize;
u16 hor_offs_align;
+ u16 min_vsize_align;
u16 out_buf_count;
};
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index cc337b1de91..615c862f036 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -220,6 +220,7 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
s_info->pdata->board_info, NULL);
if (IS_ERR_OR_NULL(sd)) {
+ i2c_put_adapter(adapter);
v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
return NULL;
}
@@ -234,12 +235,15 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_adapter *adapter;
if (!client)
return;
v4l2_device_unregister_subdev(sd);
+ adapter = client->adapter;
i2c_unregister_device(client);
- i2c_put_adapter(client->adapter);
+ if (adapter)
+ i2c_put_adapter(adapter);
}
static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
@@ -381,20 +385,28 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
static int fimc_md_register_video_nodes(struct fimc_md *fmd)
{
+ struct video_device *vdev;
int i, ret = 0;
for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
if (!fmd->fimc[i])
continue;
- if (fmd->fimc[i]->m2m.vfd)
- ret = video_register_device(fmd->fimc[i]->m2m.vfd,
- VFL_TYPE_GRABBER, -1);
- if (ret)
- break;
- if (fmd->fimc[i]->vid_cap.vfd)
- ret = video_register_device(fmd->fimc[i]->vid_cap.vfd,
- VFL_TYPE_GRABBER, -1);
+ vdev = fmd->fimc[i]->m2m.vfd;
+ if (vdev) {
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ break;
+ v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
+ }
+
+ vdev = fmd->fimc[i]->vid_cap.vfd;
+ if (vdev == NULL)
+ continue;
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
}
return ret;
@@ -502,7 +514,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
if (WARN(csis == NULL,
"MIPI-CSI interface specified "
"but s5p-csis module is not loaded!\n"))
- continue;
+ return -EINVAL;
ret = media_entity_create_link(&sensor->entity, 0,
&csis->entity, CSIS_PAD_SINK,
@@ -742,9 +754,6 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
struct fimc_md *fmd;
int ret;
- if (WARN(!pdev->dev.platform_data, "Platform data not specified!\n"))
- return -EINVAL;
-
fmd = kzalloc(sizeof(struct fimc_md), GFP_KERNEL);
if (!fmd)
return -ENOMEM;
@@ -782,9 +791,11 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
if (ret)
goto err3;
- ret = fimc_md_register_sensor_entities(fmd);
- if (ret)
- goto err3;
+ if (pdev->dev.platform_data) {
+ ret = fimc_md_register_sensor_entities(fmd);
+ if (ret)
+ goto err3;
+ }
ret = fimc_md_create_links(fmd);
if (ret)
goto err3;
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 20e664e3416..44f5c2d1920 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -35,6 +35,9 @@ void fimc_hw_reset(struct fimc_dev *dev)
cfg = readl(dev->regs + S5P_CIGCTRL);
cfg &= ~S5P_CIGCTRL_SWRST;
writel(cfg, dev->regs + S5P_CIGCTRL);
+
+ if (dev->variant->out_buf_count > 4)
+ fimc_hw_set_dma_seq(dev, 0xF);
}
static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
@@ -251,7 +254,14 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *src_frame = &ctx->s_frame;
struct fimc_frame *dst_frame = &ctx->d_frame;
- u32 cfg = 0;
+
+ u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+
+ cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
+ S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
+ S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
+ S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
+ S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -308,9 +318,9 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
fimc_hw_set_scaler(ctx);
cfg = readl(dev->regs + S5P_CISCCTRL);
+ cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
if (variant->has_mainscaler_ext) {
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
writel(cfg, dev->regs + S5P_CISCCTRL);
@@ -323,7 +333,6 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
writel(cfg, dev->regs + S5P_CIEXTEN);
} else {
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
writel(cfg, dev->regs + S5P_CISCCTRL);
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index 1e8cdb77d4b..dff9dc79879 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -61,7 +61,7 @@ static struct s5p_mfc_fmt formats[] = {
.num_planes = 1,
},
{
- .name = "H264 Encoded Stream",
+ .name = "H263 Encoded Stream",
.fourcc = V4L2_PIX_FMT_H263,
.codec_mode = S5P_FIMV_CODEC_H263_ENC,
.type = MFC_FMT_ENC,
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
index e16d3a4bc1d..b47d0c06ecf 100644
--- a/drivers/media/video/s5p-tv/mixer_video.c
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -16,6 +16,7 @@
#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/version.h>
#include <linux/timer.h>
#include <media/videobuf2-dma-contig.h>
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index f390682629c..c51decfcae1 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -566,8 +566,10 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
ret = sh_mobile_ceu_soft_reset(pcdev);
csi2_sd = find_csi2(pcdev);
- if (csi2_sd)
- csi2_sd->grp_id = (long)icd;
+ if (csi2_sd) {
+ csi2_sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(csi2_sd, icd);
+ }
ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
@@ -768,7 +770,7 @@ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
{
if (pcdev->csi2_pdev) {
struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
- if (csi2_sd && csi2_sd->grp_id == (u32)icd)
+ if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
return csi2_sd;
}
@@ -1089,8 +1091,9 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
/* Try 2560x1920, 1280x960, 640x480, 320x240 */
mf.width = 2560 >> shift;
mf.height = 1920 >> shift;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- s_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
if (ret < 0)
return ret;
shift++;
@@ -1389,7 +1392,8 @@ static int client_s_fmt(struct soc_camera_device *icd,
bool ceu_1to1;
int ret;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
s_mbus_fmt, mf);
if (ret < 0)
return ret;
@@ -1426,8 +1430,9 @@ static int client_s_fmt(struct soc_camera_device *icd,
tmp_h = min(2 * tmp_h, max_height);
mf->width = tmp_w;
mf->height = tmp_h;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- s_mbus_fmt, mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, mf);
dev_geo(dev, "Camera scaled to %ux%u\n",
mf->width, mf->height);
if (ret < 0) {
@@ -1580,8 +1585,9 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
}
if (interm_width < icd->user_width || interm_height < icd->user_height) {
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video,
- s_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
if (ret < 0)
return ret;
@@ -1867,7 +1873,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
mf.code = xlate->code;
mf.colorspace = pix->colorspace;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+ video, try_mbus_fmt, &mf);
if (ret < 0)
return ret;
@@ -1891,8 +1898,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
*/
mf.width = 2560;
mf.height = 1920;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- try_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ try_mbus_fmt, &mf);
if (ret < 0) {
/* Shouldn't actually happen... */
dev_err(icd->parent,
diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c
index ea4f0473ed3..8a652b53ff7 100644
--- a/drivers/media/video/sh_mobile_csi2.c
+++ b/drivers/media/video/sh_mobile_csi2.c
@@ -143,7 +143,7 @@ static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
.flags = priv->mipi_flags};
@@ -202,7 +202,7 @@ static void sh_csi2_hwinit(struct sh_csi2 *priv)
static int sh_csi2_client_connect(struct sh_csi2 *priv)
{
struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
- struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
struct device *dev = v4l2_get_subdevdata(&priv->subdev);
struct v4l2_mbus_config cfg;
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index b72580c3895..62e4312515c 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -1103,7 +1103,8 @@ static int soc_camera_probe(struct soc_camera_device *icd)
}
sd = soc_camera_to_subdev(icd);
- sd->grp_id = (long)icd;
+ sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(sd, icd);
if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
goto ectrl;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a1cb21f9530..1e0e27cbe98 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1606,6 +1606,14 @@ static const struct mmc_fixup blk_fixups[] =
MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some Micron MMC cards needs longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
END_FIXUP
};
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5278ffb20e7..950b97d7412 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -529,6 +529,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
data->timeout_clks = 0;
}
}
+
+ /*
+ * Some cards require longer data read timeout than indicated in CSD.
+ * Address this by setting the read timeout to a "reasonably high"
+ * value. For the cards tested, 300ms has proven enough. If necessary,
+ * this value can be increased if other problematic cards require this.
+ */
+ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+ data->timeout_ns = 300000000;
+ data->timeout_clks = 0;
+ }
+
/*
* Some cards need very high timeouts if driven in SPI mode.
* The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
mmc_host_clk_release(host);
}
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+ struct mmc_card *card;
+ unsigned int timeout;
+ unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+ int err = 0;
+
+ card = host->card;
+
+ /*
+ * Send power notify command only if card
+ * is mmc and notify state is powered ON
+ */
+ if (card && mmc_card_mmc(card) &&
+ (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+ if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+ notify_type = EXT_CSD_POWER_OFF_SHORT;
+ timeout = card->ext_csd.generic_cmd6_time;
+ card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+ } else {
+ notify_type = EXT_CSD_POWER_OFF_LONG;
+ timeout = card->ext_csd.power_off_longtime;
+ card->poweroff_notify_state = MMC_POWEROFF_LONG;
+ }
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout);
+
+ if (err && err != -EBADMSG)
+ pr_err("Device failed to respond within %d poweroff "
+ "time. Forcefully powering down the device\n",
+ timeout);
+
+ /* Set the card state to no notification after the poweroff */
+ card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+ }
+}
+
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@ static void mmc_power_up(struct mmc_host *host)
void mmc_power_off(struct mmc_host *host)
{
- struct mmc_card *card;
- unsigned int notify_type;
- unsigned int timeout;
- int err;
-
mmc_host_clk_hold(host);
- card = host->card;
host->ios.clock = 0;
host->ios.vdd = 0;
- if (card && mmc_card_mmc(card) &&
- (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
- if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
- notify_type = EXT_CSD_POWER_OFF_SHORT;
- timeout = card->ext_csd.generic_cmd6_time;
- card->poweroff_notify_state = MMC_POWEROFF_SHORT;
- } else {
- notify_type = EXT_CSD_POWER_OFF_LONG;
- timeout = card->ext_csd.power_off_longtime;
- card->poweroff_notify_state = MMC_POWEROFF_LONG;
- }
-
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout);
-
- if (err && err != -EBADMSG)
- pr_err("Device failed to respond within %d poweroff "
- "time. Forcefully powering down the device\n",
- timeout);
-
- /* Set the card state to no notification after the poweroff */
- card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
- }
+ mmc_poweroff_notify(host);
/*
* Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@ int mmc_card_sleep(struct mmc_host *host)
mmc_bus_get(host);
- if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+ if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
err = host->bus_ops->sleep(host);
mmc_bus_put(host);
@@ -2302,8 +2324,17 @@ int mmc_suspend_host(struct mmc_host *host)
* pre-claim the host.
*/
if (mmc_try_claim_host(host)) {
- if (host->bus_ops->suspend)
+ if (host->bus_ops->suspend) {
+ /*
+ * For eMMC 4.5 device send notify command
+ * before sleep, because in sleep state eMMC 4.5
+ * devices respond to only RESET and AWAKE cmd
+ */
+ mmc_poweroff_notify(host);
err = host->bus_ops->suspend(host);
+ }
+ mmc_do_release_host(host);
+
if (err == -ENOSYS || !host->bus_ops->resume) {
/*
* We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@ int mmc_suspend_host(struct mmc_host *host)
host->pm_flags = 0;
err = 0;
}
- mmc_do_release_host(host);
} else {
err = -EBUSY;
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index e8a5eb38748..d31c78b72b0 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -302,17 +302,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_size = 512;
host->max_blk_count = PAGE_CACHE_SIZE / 512;
- /*
- * Enable runtime power management by default. This flag was added due
- * to runtime power management causing disruption for some users, but
- * the power on/off code has been improved since then.
- *
- * We'll enable this flag by default as an experiment, and if no
- * problems are reported, we will follow up later and remove the flag
- * altogether.
- */
- host->caps = MMC_CAP_POWER_OFF_CARD;
-
return host;
free:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index dbf421a6279..d240427c124 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -876,17 +876,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* set the notification byte in the ext_csd register of device
*/
if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
- (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+ (card->ext_csd.rev >= 6)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
EXT_CSD_POWER_ON,
card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
- }
- if (!err)
- card->poweroff_notify_state = MMC_POWERED_ON;
+ /*
+ * The err can be -EBADMSG or 0,
+ * so check for success and update the flag
+ */
+ if (!err)
+ card->poweroff_notify_state = MMC_POWERED_ON;
+ }
/*
* Activate high speed (if supported)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 325ea61e12d..8e0fbe99404 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -732,6 +732,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
"failed to config DMA channel. Falling back to PIO\n");
dma_release_channel(host->dma);
host->do_dma = 0;
+ host->dma = NULL;
}
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 101cd31c822..d5fe43d53c5 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1010,6 +1010,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
omap_free_dma(dma_ch);
+ host->data->host_cookie = 0;
}
host->data = NULL;
}
@@ -1575,8 +1576,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct mmc_data *data = mrq->data;
if (host->use_dma) {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ if (data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
data->host_cookie = 0;
}
}
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 4b920b7621c..b4257e70061 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
+#include <linux/module.h>
#include <mach/cns3xxx.h>
#include "sdhci-pltfm.h"
@@ -108,13 +109,10 @@ static struct platform_driver sdhci_cns3xxx_driver = {
.driver = {
.name = "sdhci-cns3xxx",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_cns3xxx_probe,
.remove = __devexit_p(sdhci_cns3xxx_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_cns3xxx_init(void)
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index f2d29dca442..a81312c91f7 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -82,13 +82,10 @@ static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_dove_probe,
.remove = __devexit_p(sdhci_dove_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_dove_init(void)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 4b976f00ea8..38ebc4ea259 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -599,14 +599,11 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
.name = "sdhci-esdhc-imx",
.owner = THIS_MODULE,
.of_match_table = imx_esdhc_dt_ids,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.id_table = imx_esdhc_devtype,
.probe = sdhci_esdhc_imx_probe,
.remove = __devexit_p(sdhci_esdhc_imx_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_esdhc_imx_init(void)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 59e9d003e58..01e5f627e0f 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -125,13 +125,10 @@ static struct platform_driver sdhci_esdhc_driver = {
.name = "sdhci-esdhc",
.owner = THIS_MODULE,
.of_match_table = sdhci_esdhc_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_esdhc_probe,
.remove = __devexit_p(sdhci_esdhc_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_esdhc_init(void)
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 9b0d794a4f6..3619adc7d9f 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -87,13 +87,10 @@ static struct platform_driver sdhci_hlwd_driver = {
.name = "sdhci-hlwd",
.owner = THIS_MODULE,
.of_match_table = sdhci_hlwd_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_hlwd_probe,
.remove = __devexit_p(sdhci_hlwd_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_hlwd_init(void)
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index d833d9c2f7e..6878a94626b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -54,8 +54,7 @@ struct sdhci_pci_fixes {
int (*probe_slot) (struct sdhci_pci_slot *);
void (*remove_slot) (struct sdhci_pci_slot *, int);
- int (*suspend) (struct sdhci_pci_chip *,
- pm_message_t);
+ int (*suspend) (struct sdhci_pci_chip *);
int (*resume) (struct sdhci_pci_chip *);
};
@@ -549,7 +548,7 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
jmicron_enable_mmc(slot->host, 0);
}
-static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
+static int jmicron_suspend(struct sdhci_pci_chip *chip)
{
int i;
@@ -993,8 +992,9 @@ static struct sdhci_ops sdhci_pci_ops = {
#ifdef CONFIG_PM
-static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int sdhci_pci_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
mmc_pm_flag_t slot_pm_flags;
@@ -1010,7 +1010,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (!slot)
continue;
- ret = sdhci_suspend_host(slot->host, state);
+ ret = sdhci_suspend_host(slot->host);
if (ret) {
for (i--; i >= 0; i--)
@@ -1026,7 +1026,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
if (chip->fixes && chip->fixes->suspend) {
- ret = chip->fixes->suspend(chip, state);
+ ret = chip->fixes->suspend(chip);
if (ret) {
for (i = chip->num_slots - 1; i >= 0; i--)
sdhci_resume_host(chip->slots[i]->host);
@@ -1042,16 +1042,17 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
pci_set_power_state(pdev, PCI_D3hot);
} else {
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pci_set_power_state(pdev, PCI_D3hot);
}
return 0;
}
-static int sdhci_pci_resume(struct pci_dev *pdev)
+static int sdhci_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
int i, ret;
@@ -1099,7 +1100,6 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
- pm_message_t state = { .event = PM_EVENT_SUSPEND };
int i, ret;
chip = pci_get_drvdata(pdev);
@@ -1121,7 +1121,7 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
}
if (chip->fixes && chip->fixes->suspend) {
- ret = chip->fixes->suspend(chip, state);
+ ret = chip->fixes->suspend(chip);
if (ret) {
for (i = chip->num_slots - 1; i >= 0; i--)
sdhci_runtime_resume_host(chip->slots[i]->host);
@@ -1176,6 +1176,8 @@ static int sdhci_pci_runtime_idle(struct device *dev)
#endif
static const struct dev_pm_ops sdhci_pci_pm_ops = {
+ .suspend = sdhci_pci_suspend,
+ .resume = sdhci_pci_resume,
.runtime_suspend = sdhci_pci_runtime_suspend,
.runtime_resume = sdhci_pci_runtime_resume,
.runtime_idle = sdhci_pci_runtime_idle,
@@ -1428,8 +1430,6 @@ static struct pci_driver sdhci_driver = {
.id_table = pci_ids,
.probe = sdhci_pci_probe,
.remove = __devexit_p(sdhci_pci_remove),
- .suspend = sdhci_pci_suspend,
- .resume = sdhci_pci_resume,
.driver = {
.pm = &sdhci_pci_pm_ops
},
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index a9e12ea0558..03970bcb349 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -194,21 +194,25 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
#ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+static int sdhci_pltfm_suspend(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
- return sdhci_suspend_host(host, state);
+ return sdhci_suspend_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
-int sdhci_pltfm_resume(struct platform_device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+
+const struct dev_pm_ops sdhci_pltfm_pmops = {
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+};
+EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
#endif /* CONFIG_PM */
static int __init sdhci_pltfm_drv_init(void)
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 3a9fc3f4084..37e0e184a0b 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -99,8 +99,10 @@ extern int sdhci_pltfm_register(struct platform_device *pdev,
extern int sdhci_pltfm_unregister(struct platform_device *pdev);
#ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
-extern int sdhci_pltfm_resume(struct platform_device *dev);
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
+#else
+#define SDHCI_PLTFM_PMOPS NULL
#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index d4bf6d30c7b..7a039c3cb1f 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -218,13 +218,10 @@ static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav2_probe,
.remove = __devexit_p(sdhci_pxav2_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_pxav2_init(void)
{
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index cff4ad3e7a5..15673a7ee6a 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -264,13 +264,10 @@ static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav3_probe,
.remove = __devexit_p(sdhci_pxav3_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_pxav3_init(void)
{
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 3d00e722efc..0d33ff0d67f 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -622,33 +622,38 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
-static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+static int sdhci_s3c_suspend(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
- return sdhci_suspend_host(host, pm);
+ return sdhci_suspend_host(host);
}
-static int sdhci_s3c_resume(struct platform_device *dev)
+static int sdhci_s3c_resume(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+ .suspend = sdhci_s3c_suspend,
+ .resume = sdhci_s3c_resume,
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
#else
-#define sdhci_s3c_suspend NULL
-#define sdhci_s3c_resume NULL
+#define SDHCI_S3C_PMOPS NULL
#endif
static struct platform_driver sdhci_s3c_driver = {
.probe = sdhci_s3c_probe,
.remove = __devexit_p(sdhci_s3c_remove),
- .suspend = sdhci_s3c_suspend,
- .resume = sdhci_s3c_resume,
.driver = {
.owner = THIS_MODULE,
.name = "s3c-sdhci",
+ .pm = SDHCI_S3C_PMOPS,
},
};
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 89699e861fc..e2e18d3f949 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -318,13 +318,10 @@ static struct platform_driver sdhci_tegra_driver = {
.name = "sdhci-tegra",
.owner = THIS_MODULE,
.of_match_table = sdhci_tegra_dt_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_tegra_probe,
.remove = __devexit_p(sdhci_tegra_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_tegra_init(void)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6d8eea32354..19ed580f2ca 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2327,7 +2327,7 @@ out:
#ifdef CONFIG_PM
-int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host)
{
int ret;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0a5b65460d8..a04d4d0c6fd 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -374,7 +374,7 @@ extern int sdhci_add_host(struct sdhci_host *host);
extern void sdhci_remove_host(struct sdhci_host *host, int dead);
#ifdef CONFIG_PM
-extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
+extern int sdhci_suspend_host(struct sdhci_host *host);
extern int sdhci_resume_host(struct sdhci_host *host);
extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
#endif
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 369366c8e20..d5505f3fe2a 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -908,7 +908,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->power) {
pm_runtime_put(&host->pd->dev);
host->power = false;
- if (p->down_pwr)
+ if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
p->down_pwr(host->pd);
}
host->state = STATE_IDLE;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index d85a60cda16..4208b395806 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -798,7 +798,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* start bus clock */
tmio_mmc_clk_start(host);
} else if (ios->power_mode != MMC_POWER_UP) {
- if (host->set_pwr)
+ if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
host->set_pwr(host->pdev, 0);
if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
pdata->power) {
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index e8f6e65183d..2ec978bc32b 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
static int firmware_rom_wait_states = 0x1C;
#endif
-module_param(firmware_rom_wait_states, bool, 0644);
+module_param(firmware_rom_wait_states, int, 0644);
MODULE_PARM_DESC(firmware_rom_wait_states,
"ROM wait states byte=RRRIIEEE (Reserved Internal External)");
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 94f55348972..45876d0e5b8 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -227,10 +227,14 @@ static int platram_probe(struct platform_device *pdev)
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
- /* add the whole device. */
- err = mtd_device_register(info->mtd, NULL, 0);
- if (err)
- dev_err(&pdev->dev, "failed to register the entire device\n");
+ if (pdata->nr_partitions) {
+ /* add the whole device. */
+ err = mtd_device_register(info->mtd, NULL, 0);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register the entire device\n");
+ }
+ }
return err;
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 411a17df9fc..2a25b6789af 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
}
info->mtd->owner = THIS_MODULE;
- mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+ mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
platform_set_drvdata(pdev, info);
return 0;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 071b63420f0..493ec2fcf97 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -21,9 +21,9 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/mtd/gpmi-nand.h>
#include <linux/mtd/partitions.h>
-
#include "gpmi-nand.h"
/* add our owner bbt descriptor */
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ee1713907b9..f8aacf48ecd 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -188,7 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (!flash_np)
return -ENODEV;
- ppdata->of_node = flash_np;
+ ppdata.of_node = flash_np;
ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
dev_name(&ndfc->ofdev->dev), flash_np->name);
if (!ndfc->mtd.name) {
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index a73d9dc80ff..84fb6349a59 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -4,7 +4,7 @@
menuconfig ARCNET
depends on NETDEVICES && (ISA || PCI || PCMCIA)
- bool "ARCnet support"
+ tristate "ARCnet support"
---help---
If you have a network card of this type, say Y and check out the
(arguably) beautiful poetry in
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b0c57725648..7f8756825b8 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2553,30 +2553,6 @@ re_arm:
}
}
-static __be32 bond_glean_dev_ip(struct net_device *dev)
-{
- struct in_device *idev;
- struct in_ifaddr *ifa;
- __be32 addr = 0;
-
- if (!dev)
- return 0;
-
- rcu_read_lock();
- idev = __in_dev_get_rcu(dev);
- if (!idev)
- goto out;
-
- ifa = idev->ifa_list;
- if (!ifa)
- goto out;
-
- addr = ifa->ifa_local;
-out:
- rcu_read_unlock();
- return addr;
-}
-
static int bond_has_this_ip(struct bonding *bond, __be32 ip)
{
struct vlan_entry *vlan;
@@ -3322,6 +3298,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
struct bonding *bond;
struct vlan_entry *vlan;
+ /* we only care about primary address */
+ if(ifa->ifa_flags & IFA_F_SECONDARY)
+ return NOTIFY_DONE;
+
list_for_each_entry(bond, &bn->dev_list, bond_list) {
if (bond->dev == event_dev) {
switch (event) {
@@ -3329,7 +3309,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
bond->master_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- bond->master_ip = bond_glean_dev_ip(bond->dev);
+ bond->master_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
@@ -3345,8 +3325,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
vlan->vlan_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- vlan->vlan_ip =
- bond_glean_dev_ip(vlan_dev);
+ vlan->vlan_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 905bce0b3a4..2c7f5036f57 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -20,7 +20,6 @@
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 4cf835dbc12..3fb66d09ece 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
skb->len,
DMA_TO_DEVICE);
rp->skb = NULL;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
}
bp->tx_cons = cons;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index bce203fa4b9..882f48f0a03 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -10327,6 +10327,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
return 0;
}
+
+static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 temp;
+
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL1);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= 0xff00;
+
+ DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ temp |= 0x00ee;
+ break;
+ case LED_MODE_OPER:
+ temp |= 0x0001;
+ break;
+ case LED_MODE_ON:
+ temp |= 0x00ff;
+ break;
+ default:
+ break;
+ }
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ return;
+}
+
+
static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -11103,7 +11140,7 @@ static struct bnx2x_phy phy_54618se = {
.config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
.format_fw_ver = (format_fw_ver_t)NULL,
.hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
.phy_specific_func = (phy_specific_func_t)NULL
};
/*****************************************************************/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index fc7bd0f23c0..e58073ef33b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -6990,6 +6990,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_INTR_MASK 0x1b
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 438f4580bf6..2a22f525635 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -613,7 +613,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
if (!dm->wake_state)
irq_set_irq_wake(dm->irq_wake, 1);
- else if (dm->wake_state & !opts)
+ else if (dm->wake_state && !opts)
irq_set_irq_wake(dm->irq_wake, 0);
}
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index c520cfd3b29..5272f9d4dda 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -24,6 +24,7 @@ config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS)
+ default ARCH_MXC || ARCH_MXS if ARM
select PHYLIB
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 1124ce0a159..c136230d50b 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -232,6 +232,7 @@ struct fec_enet_private {
struct platform_device *pdev;
int opened;
+ int dev_id;
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
@@ -837,7 +838,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
/* Adjust MAC if using macaddr */
if (iap == macaddr)
- ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
}
/* ------------------------------------------------------------------------- */
@@ -953,7 +954,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
int phy_id;
- int dev_id = fep->pdev->id;
+ int dev_id = fep->dev_id;
fep->phy_dev = NULL;
@@ -1031,7 +1032,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
fep->mii_bus = fec0_mii_bus;
return 0;
@@ -1063,7 +1064,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
fep->mii_bus->reset = fec_enet_mdio_reset;
- snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
@@ -1521,6 +1522,7 @@ fec_probe(struct platform_device *pdev)
int i, irq, ret = 0;
struct resource *r;
const struct of_device_id *of_id;
+ static int dev_id;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
@@ -1548,6 +1550,7 @@ fec_probe(struct platform_device *pdev)
fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
+ fep->dev_id = dev_id++;
if (!fep->hwp) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 52f4e8ad48e..4d9f84b8ab9 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -183,28 +183,10 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
}
EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
-/* Scan the bus in reverse, looking for an empty spot */
-static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
-{
- int i;
-
- for (i = PHY_MAX_ADDR; i > 0; i--) {
- u32 phy_id;
-
- if (get_phy_id(new_bus, i, &phy_id))
- return -1;
-
- if (phy_id == 0xffffffff)
- break;
- }
-
- return i;
-}
-
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
{
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
struct gfar __iomem *enet_regs;
/*
@@ -220,15 +202,15 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi")) {
return of_iomap(np, 1);
- } else
- return NULL;
-}
+ }
#endif
+ return NULL;
+}
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
struct device_node *np = NULL;
int err = 0;
@@ -261,9 +243,10 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
return err;
else
return -EINVAL;
-}
+#else
+ return -ENODEV;
#endif
-
+}
static int fsl_pq_mdio_probe(struct platform_device *ofdev)
{
@@ -339,19 +322,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi") ||
of_device_is_compatible(np, "gianfar")) {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
tbipa = get_gfar_tbipa(regs, np);
if (!tbipa) {
err = -EINVAL;
goto err_free_irqs;
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
u32 id;
static u32 mii_mng_master;
@@ -364,10 +341,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
mii_mng_master = id;
ucc_set_qe_mux_mii_mng(id - 1);
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else {
err = -ENODEV;
goto err_free_irqs;
@@ -386,16 +359,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
}
if (tbiaddr == -1) {
- out_be32(tbipa, 0);
-
- tbiaddr = fsl_pq_mdio_find_free(new_bus);
- }
-
- /*
- * We define TBIPA at 0 to be illegal, opting to fail for boards that
- * have PHYs at 1-31, rather than change tbipa and rescan.
- */
- if (tbiaddr == 0) {
err = -EBUSY;
goto err_free_irqs;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 410d6a1984e..6650068c996 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -61,9 +61,9 @@
#ifdef EHEA_SMALL_QUEUES
#define EHEA_MAX_CQE_COUNT 1023
#define EHEA_DEF_ENTRIES_SQ 1023
-#define EHEA_DEF_ENTRIES_RQ1 4095
+#define EHEA_DEF_ENTRIES_RQ1 1023
#define EHEA_DEF_ENTRIES_RQ2 1023
-#define EHEA_DEF_ENTRIES_RQ3 1023
+#define EHEA_DEF_ENTRIES_RQ3 511
#else
#define EHEA_MAX_CQE_COUNT 4080
#define EHEA_DEF_ENTRIES_SQ 4080
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 37b70f7052b..bfeccbfde23 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
out_herr:
free_page((unsigned long)cb2);
resched:
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
}
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -2434,7 +2435,8 @@ static int ehea_open(struct net_device *dev)
}
mutex_unlock(&port->port_lock);
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
return ret;
}
diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
index 4326681df38..acc31af6594 100644
--- a/drivers/net/ethernet/ibm/iseries_veth.c
+++ b/drivers/net/ethernet/ibm/iseries_veth.c
@@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
/* FIXME: do we need this? */
memset(local_list, 0, sizeof(local_list));
- memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
+ memset(remote_list, 0, sizeof(remote_list));
/* a 0 address marks the end of the valid entries */
if (senddata->addr[startchunk] == 0)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 7becff1f387..76b84573566 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1745,6 +1745,112 @@ jme_phy_off(struct jme_adapter *jme)
}
static int
+jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_READ | specreg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+ return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+ JM_PHY_SPEC_DATA_REG);
+}
+
+static void
+jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
+ phy_data);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+}
+
+static int
+jme_phy_calibration(struct jme_adapter *jme)
+{
+ u32 ctrl1000, phy_data;
+
+ jme_phy_off(jme);
+ jme_phy_on(jme);
+ /* Enabel PHY test mode 1 */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ ctrl1000 |= PHY_GAD_TEST_MODE_1;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
+ phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
+ JM_PHY_EXT_COMM_2_CALI_ENABLE;
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+ msleep(20);
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
+ JM_PHY_EXT_COMM_2_CALI_MODE_0 |
+ JM_PHY_EXT_COMM_2_CALI_LATCH);
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+
+ /* Disable PHY test mode */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+ return 0;
+}
+
+static int
+jme_phy_setEA(struct jme_adapter *jme)
+{
+ u32 phy_comm0 = 0, phy_comm1 = 0;
+ u8 nic_ctrl;
+
+ pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
+ if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
+ return 0;
+
+ switch (jme->pdev->device) {
+ case PCI_DEVICE_ID_JMICRON_JMC250:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ break;
+ case PCI_DEVICE_ID_JMICRON_JMC260:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
+ phy_comm0 = 0x608A;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
+ phy_comm0 = 0x408A;
+ break;
+ default:
+ return -ENODEV;
+ }
+ if (phy_comm0)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
+ if (phy_comm1)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
+
+ return 0;
+}
+
+static int
jme_open(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_reset_link(jme);
return 0;
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_start_irq(jme);
netif_device_attach(netdev);
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
-
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 02ea27c1dcb..4304072bd3c 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
RXMCS_CHECKSUM,
};
+/* Extern PHY common register 2 */
+
+#define PHY_GAD_TEST_MODE_1 0x00002000
+#define PHY_GAD_TEST_MODE_MSK 0x0000E000
+#define JM_PHY_SPEC_REG_READ 0x00004000
+#define JM_PHY_SPEC_REG_WRITE 0x00008000
+#define PHY_CALIBRATION_DELAY 20
+#define JM_PHY_SPEC_ADDR_REG 0x1E
+#define JM_PHY_SPEC_DATA_REG 0x1F
+
+#define JM_PHY_EXT_COMM_0_REG 0x30
+#define JM_PHY_EXT_COMM_1_REG 0x31
+#define JM_PHY_EXT_COMM_2_REG 0x32
+#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01
+#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02
+#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10
+#define PCI_PRIV_SHARE_NICCTRL 0xF5
+#define JME_FLAG_PHYEA_ENABLE 0x2
+
/*
* Wakeup Frame setup interface registers
*/
diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile
index 05db5434baf..90497ffb1ac 100644
--- a/drivers/net/ethernet/pasemi/Makefile
+++ b/drivers/net/ethernet/pasemi/Makefile
@@ -2,4 +2,5 @@
# Makefile for the A Semi network device drivers.
#
-obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
+pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 8731f79c9ef..b8478aab050 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -58,10 +58,8 @@
#define TX_DESC_PER_IOCB 8
-/* The maximum number of frags we handle is based
- * on PAGE_SIZE...
- */
-#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
+
+#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
#else /* all other page sizes */
#define TX_DESC_PER_OAL 0
@@ -1353,7 +1351,7 @@ struct tx_ring_desc {
struct ob_mac_iocb_req *queue_entry;
u32 index;
struct oal oal;
- struct map_list map[MAX_SKB_FRAGS + 1];
+ struct map_list map[MAX_SKB_FRAGS + 2];
int map_cnt;
struct tx_ring_desc *next;
};
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6f06aa10f0d..67bf0781999 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1183,11 +1183,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
return value;
}
-static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
- RTL_W16(IntrMask, 0x0000);
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W16(IntrStatus, 0xffff);
+ RTL_W16(IntrMask, 0x0000);
+ RTL_W16(IntrStatus, tp->intr_event);
+ RTL_R8(ChipCmd);
}
static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -3933,8 +3935,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
break;
udelay(100);
}
-
- rtl8169_init_ring_indexes(tp);
}
static int __devinit
@@ -4339,7 +4339,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
/* Disable interrupts */
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
rtl_rx_close(tp);
@@ -4885,8 +4885,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W16(IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
- tp->mac_version == RTL_GIGA_MAC_VER_22) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
tp->intr_event |= RxFIFOOver | PCSTimeout;
tp->intr_event &= ~RxOverflow;
}
@@ -5076,6 +5075,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
+ tp->intr_event &= ~RxFIFOOver;
+ tp->napi_event &= ~RxFIFOOver;
+ }
+
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16) {
int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5346,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
/* Wait for any pending NAPI task to complete */
napi_disable(&tp->napi);
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
tp->intr_mask = 0xffff;
RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5393,16 @@ static void rtl8169_reset_task(struct work_struct *work)
if (!netif_running(dev))
goto out_unlock;
+ rtl8169_hw_reset(tp);
+
rtl8169_wait_for_quiescence(dev);
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
rtl8169_tx_clear(tp);
+ rtl8169_init_ring_indexes(tp);
- rtl8169_hw_reset(tp);
rtl_hw_start(dev);
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5413,6 @@ out_unlock:
static void rtl8169_tx_timeout(struct net_device *dev)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl8169_hw_reset(tp);
-
- /* Let's wait a bit while any (async) irq lands on */
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
@@ -5804,6 +5805,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
*/
status = RTL_R16(IntrStatus);
while (status && status != 0xffff) {
+ status &= tp->intr_event;
+ if (!status)
+ break;
+
handled = 1;
/* Handle all of the error cases first. These will reset
@@ -5818,27 +5823,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
switch (tp->mac_version) {
/* Work around for rx fifo overflow */
case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_26:
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
goto done;
- /* Testers needed. */
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- /* Experimental science. Pktgen proof. */
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_25:
- if (status == RxFIFOOver)
- goto done;
- break;
default:
break;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8ea770a89f2..72cd190b9c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -781,10 +781,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- /* Do not manage MMC IRQ (FIXME) */
+ /* Mask MMC irq, counters are managed in SW and registers
+ * are cleared on each READ eventually. */
dwmac_mmc_intr_all_mask(priv->ioaddr);
- dwmac_mmc_ctrl(priv->ioaddr, mode);
- memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+
+ if (priv->dma_cap.rmon) {
+ dwmac_mmc_ctrl(priv->ioaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+ } else
+ pr_info(" No MAC Management Counters available");
}
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
@@ -1012,8 +1017,7 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- if (priv->dma_cap.rmon)
- stmmac_mmc_setup(priv);
+ stmmac_mmc_setup(priv);
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 10826d8a2a2..1187a1169eb 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
goto done;
/* Re-enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
/* HACK: Avoid the "rotting packet" problem (see above). */
if (qup->__packet_receive_read !=
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
info->napi_enabled = true;
/* Enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
}
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
for (i = 0; i < sh->nr_frags; i++) {
skb_frag_t *f = &sh->frags[i];
- unsigned long pfn = page_to_pfn(f->page);
+ unsigned long pfn = page_to_pfn(skb_frag_page(f));
/* FIXME: Compute "hash_for_home" properly. */
/* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
/* FIXME: Hmmm. */
if (!hash_default) {
void *va = pfn_to_kaddr(pfn) + f->page_offset;
- BUG_ON(PageHighMem(f->page));
+ BUG_ON(PageHighMem(skb_frag_page(f)));
finv_buffer_remote(va, f->size, 0);
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index bb88e12101c..a70244306c9 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig PHYLIB
- bool "PHY Device support and infrastructure"
+ tristate "PHY Device support and infrastructure"
depends on !S390
depends on NETDEVICES
help
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 89f829f5f72..f8a6853b692 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp;
- if (add_chan(po)) {
- release_sock(sk);
+ if (add_chan(po))
error = -EBUSY;
- }
release_sock(sk);
return error;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2f91acccb7d..8873c6e6fb9 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1827,7 +1827,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
}
/* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
- REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 93fbe6f4089..d2348a5a780 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath_start_ani(common);
}
- if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
struct ath_hw_antcomb_conf div_ant_conf;
u8 lna_conf;
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index e12b48c2cff..dd008b0e641 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -191,6 +191,7 @@ static struct iwl_base_params iwl1000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 128,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index c511c98a89a..f55fb2d1af5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -364,6 +364,7 @@ static struct iwl_base_params iwl5000_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.no_idle_support = true,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 58a381c01c8..a7a6def40d0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -528,6 +528,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return 0;
}
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx)
+{
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+}
+
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
{
struct iwl_priv *priv = hw->priv;
@@ -586,19 +604,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
ctx->ht.enabled = conf_is_ht(conf);
if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
+ /* if HT40 is used, it should not change
+ * after associated except channel switch */
+ if (iwl_is_associated_ctx(ctx) &&
+ !ctx->ht.is_40mhz)
+ iwlagn_config_ht40(conf, ctx);
} else
ctx->ht.is_40mhz = false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index ed628362393..4b2aa1da095 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -1268,9 +1268,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
if (sta)
addr = sta->addr;
else /* station mode case only */
@@ -1283,8 +1280,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
seq.tkip.iv32, p1k, CMD_SYNC);
break;
case WLAN_CIPHER_SUITE_CCMP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- /* fall through */
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index ccba69b7f8a..bacc06c95e7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2316,6 +2316,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ break;
+ default:
+ break;
+ }
+
/*
* We could program these keys into the hardware as well, but we
* don't expect much multicast traffic in IBSS and having keys
@@ -2599,21 +2610,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
/* Configure HT40 channels */
ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
+ if (ctx->ht.enabled)
+ iwlagn_config_ht40(conf, ctx);
+ else
ctx->ht.is_40mhz = false;
if ((le16_to_cpu(ctx->staging.channel) != ch))
@@ -3499,9 +3498,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
-module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
+module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
MODULE_PARM_DESC(wd_disable,
- "Disable stuck queue watchdog timer (default: 0 [enabled])");
+ "Disable stuck queue watchdog timer 0=system default, "
+ "1=disable, 2=enable (default: 0)");
/*
* set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 5b936ec1a54..3856abaea50 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -86,6 +86,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx);
/* uCode */
int iwlagn_rx_calib_result(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 001fdf140ab..fcf54160e4e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1810,11 +1810,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
{
unsigned int timeout = priv->cfg->base_params->wd_timeout;
- if (timeout && !iwlagn_mod_params.wd_disable)
- mod_timer(&priv->watchdog,
- jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
- else
- del_timer(&priv->watchdog);
+ if (!iwlagn_mod_params.wd_disable) {
+ /* use system default */
+ if (timeout && !priv->cfg->base_params->wd_disable)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ } else {
+ /* module parameter overwrite default configuration */
+ if (timeout && iwlagn_mod_params.wd_disable == 2)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ }
}
/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 137da338070..f2fc288f3dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
* @shadow_reg_enable: HW shadhow register bit
* @no_idle_support: do not support idle mode
* @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * wd_disable: disable watchdog timer
*/
struct iwl_base_params {
int eeprom_size;
@@ -134,6 +135,7 @@ struct iwl_base_params {
const bool shadow_reg_enable;
const bool no_idle_support;
const bool hd_v2;
+ const bool wd_disable;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index 1f7a93c67c4..14eaf37ce3b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -120,7 +120,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
* @restart_fw: restart firmware, default = 1
* @plcp_check: enable plcp health check, default = true
* @ack_check: disable ack health check, default = false
- * @wd_disable: enable stuck queue check, default = false
+ * @wd_disable: enable stuck queue check, default = 0
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
* @no_sleep_autoadjust: disable autoadjust, default = true
@@ -141,7 +141,7 @@ struct iwl_mod_params {
int restart_fw;
bool plcp_check;
bool ack_check;
- bool wd_disable;
+ int wd_disable;
bool bt_coex_active;
int led_mode;
bool no_sleep_autoadjust;
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index f18df82eeb9..78d0d698855 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -588,8 +588,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
WARN_ON(priv->fw_state != FW_STATE_READY);
- cancel_work_sync(&priv->work);
-
p54spi_power_off(priv);
spin_lock_irqsave(&priv->tx_lock, flags);
INIT_LIST_HEAD(&priv->tx_pending);
@@ -597,6 +595,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
priv->fw_state = FW_STATE_OFF;
mutex_unlock(&priv->mutex);
+
+ cancel_work_sync(&priv->work);
}
static int __devinit p54spi_probe(struct spi_device *spi)
@@ -656,6 +656,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
init_completion(&priv->fw_comp);
INIT_LIST_HEAD(&priv->tx_pending);
mutex_init(&priv->mutex);
+ spin_lock_init(&priv->tx_lock);
SET_IEEE80211_DEV(hw, &spi->dev);
priv->common.open = p54spi_op_start;
priv->common.stop = p54spi_op_stop;
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index d97a2caf582..bc2ba80c47b 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
dwrq->flags = 0;
dwrq->length = 0;
}
- essid->octets[essid->length] = '\0';
+ essid->octets[dwrq->length] = '\0';
memcpy(extra, essid->octets, dwrq->length);
kfree(essid);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 3f183a15186..1ba079dffb1 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3771,7 +3771,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
/* Apparently the data is read from end to start */
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
/* The returned value is in CPU order, but eeprom is le */
- rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index db526284454..55c8e50f45f 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
if (mac->link_state != MAC80211_LINKED)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
/* Idle for a while if we connect to AP a while ago. */
if (mac->cnt_after_linked >= 2) {
@@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
}
/*Leave the leisure power save mode.*/
@@ -416,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ unsigned long flags;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
if (ppsc->fwctrl_lps) {
if (ppsc->dot11_psmode != EACTIVE) {
@@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
rtl_lps_set_psmode(hw, EACTIVE);
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
}
/* For sw LPS*/
@@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
}
void rtl_swlps_rfon_wq_callback(void *data)
@@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
if (rtlpriv->link_info.busytraffic)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 592a10ac592..3b585aadabf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:{
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 72852900df8..e49cf2244c7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 3ac7af1c550..0883349e1c8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index f27171af979..f10ac1ad908 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0cb594c8609..15e332d08c8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1021,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx);
for (j = start; j < i; j++) {
- pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xen_netbk_idx_release(netbk, pending_idx);
}
@@ -1668,7 +1668,7 @@ static int __init netback_init(void)
"netback/%u", group);
if (IS_ERR(netbk->task)) {
- printk(KERN_ALERT "kthread_run() fails at netback\n");
+ printk(KERN_ALERT "kthread_create() fails at netback\n");
del_timer(&netbk->net_timer);
rc = PTR_ERR(netbk->task);
goto failed_init;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 791270b8bd1..0f0cfa3bca3 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -26,11 +26,6 @@
#include <linux/string.h>
#include <linux/slab.h>
-/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
* @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
struct of_irq oirq;
if (of_irq_map_one(dev, index, &oirq))
- return NO_IRQ;
+ return 0;
return irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
/* Only dereference the resource if both the
* resource and the irq are valid. */
- if (r && irq != NO_IRQ) {
+ if (r && irq) {
r->start = r->end = irq;
r->flags = IORESOURCE_IRQ;
r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
{
int nr = 0;
- while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+ while (of_irq_to_resource(dev, nr, NULL))
nr++;
return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
int i;
for (i = 0; i < nr_irqs; i++, res++)
- if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+ if (!of_irq_to_resource(dev, i, res))
break;
return i;
@@ -424,6 +419,8 @@ void __init of_irq_init(const struct of_device_id *matches)
desc->dev = np;
desc->interrupt_parent = of_irq_find_parent(np);
+ if (desc->interrupt_parent == np)
+ desc->interrupt_parent = NULL;
list_add_tail(&desc->list, &intc_desc_list);
}
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dccd8636095..f8c752e408a 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
return err;
}
+static int timer_mode;
+
static int __init oprofile_init(void)
{
int err;
+ /* always init architecture to setup backtrace support */
err = oprofile_arch_init(&oprofile_ops);
- if (err < 0 || timer) {
- printk(KERN_INFO "oprofile: using timer interrupt.\n");
+
+ timer_mode = err || timer; /* fall back to timer mode on errors */
+ if (timer_mode) {
+ if (!err)
+ oprofile_arch_exit();
err = oprofile_timer_init(&oprofile_ops);
if (err)
return err;
}
- return oprofilefs_register();
+
+ err = oprofilefs_register();
+ if (!err)
+ return 0;
+
+ /* failed */
+ if (timer_mode)
+ oprofile_timer_exit();
+ else
+ oprofile_arch_exit();
+
+ return err;
}
static void __exit oprofile_exit(void)
{
- oprofile_timer_exit();
oprofilefs_unregister();
- oprofile_arch_exit();
+ if (timer_mode)
+ oprofile_timer_exit();
+ else
+ oprofile_arch_exit();
}
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 89f63456646..84a208dbed9 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_timeout(val);
@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
+ retval = 0;
if (val)
retval = oprofile_start();
else
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index d0de6cc2d7a..2f0aa0f700e 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
}
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
{
char tmpbuf[TMPBUFSIZE];
@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
raw_spin_lock_irqsave(&oprofilefs_lock, flags);
*val = simple_strtoul(tmpbuf, NULL, 0);
raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
- return 0;
+ return count;
}
@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
return -EINVAL;
retval = oprofilefs_ulong_from_user(&value, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_ulong(file->private_data, value);
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 3ef44624f51..878fba12658 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
ops->start = oprofile_hrtimer_start;
ops->stop = oprofile_hrtimer_stop;
ops->cpu_type = "timer";
+ printk(KERN_INFO "oprofile: using timer interrupt.\n");
return 0;
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index b6f9749b4fa..f02b5235056 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -76,6 +76,7 @@ config PCI_IOV
config PCI_PRI
bool "PCI PRI support"
+ depends on PCI
select PCI_ATS
help
PRI is the PCI Page Request Interface. It allows PCI devices that are
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 7ec56fb0bd7..b0dd08e6a9d 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/pci-ats.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include "pci.h"
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 596172b4ae9..9ddf69e3bbe 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
+ pdev = pbus->self;
+ if (pdev && pci_is_pcie(pdev)) {
+ tmp = acpi_find_root_bridge_handle(pdev);
+ if (tmp) {
+ struct acpi_pci_root *root = acpi_pci_find_root(tmp);
+
+ if (root && (root->osc_control_set &
+ OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+ return AE_OK;
+ }
+ }
+
acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
@@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
if (pdev) {
- pdev->current_state = PCI_D0;
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
pci_dev_put(pdev);
}
@@ -1378,11 +1389,13 @@ find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int *count = (int *)context;
- if (acpi_is_root_bridge(handle)) {
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge, NULL);
- (*count)++;
- }
+ if (!acpi_is_root_bridge(handle))
+ return AE_OK;
+
+ (*count)++;
+ acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ handle_hotplug_event_bridge, NULL);
+
return AE_OK ;
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 1e9c9aacc3a..085dbb5fc16 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -213,9 +213,6 @@ static int board_added(struct slot *p_slot)
goto err_exit;
}
- /* Wait for 1 second after checking link training status */
- msleep(1000);
-
/* Check for a power fault */
if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 96dc4734e4a..7b1414810ae 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -280,6 +280,14 @@ int pciehp_check_link_status(struct controller *ctrl)
else
msleep(1000);
+ /*
+ * Need to wait for 1000 ms after Data Link Layer Link Active
+ * (DLLLA) bit reads 1b before sending configuration request.
+ * We need it before checking Link Training (LT) bit becuase
+ * LT is still set even after DLLLA bit is set on some platform.
+ */
+ msleep(1000);
+
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
if (retval) {
ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
@@ -294,6 +302,16 @@ int pciehp_check_link_status(struct controller *ctrl)
return retval;
}
+ /*
+ * If the port supports Link speeds greater than 5.0 GT/s, we
+ * must wait for 100 ms after Link training completes before
+ * sending configuration request.
+ */
+ if (ctrl->pcie->port->subordinate->max_bus_speed > PCIE_SPEED_5_0GT)
+ msleep(100);
+
+ pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+
return retval;
}
@@ -484,7 +502,6 @@ int pciehp_power_on_slot(struct slot * slot)
u16 slot_cmd;
u16 cmd_mask;
u16 slot_status;
- u16 lnk_status;
int retval = 0;
/* Clear sticky power-fault bit from previous power failures */
@@ -516,14 +533,6 @@ int pciehp_power_on_slot(struct slot * slot)
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
- retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n",
- __func__);
- return retval;
- }
- pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
-
return retval;
}
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index aca972bbfb4..dd7e0c51a33 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -278,8 +278,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
static int is_shpc_capable(struct pci_dev *dev)
{
- if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
- PCI_DEVICE_ID_AMD_GOLAM_7450))
+ if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
return 1;
if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
return 0;
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 36547f0ce30..75ba2311b54 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -944,8 +944,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
ctrl_dbg(ctrl, "Hotplug Controller:\n");
- if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
- PCI_DEVICE_ID_AMD_GOLAM_7450)) {
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
/* amd shpc driver doesn't use Base Offset; assume 0 */
ctrl->mmio_base = pci_resource_start(pdev, 0);
ctrl->mmio_size = pci_resource_len(pdev, 0);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b82c155d7b3..1969a3ee305 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
+ int bars = 0;
if (!nr_virtfn)
return 0;
@@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ bars |= (1 << (i + PCI_IOV_RESOURCES));
res = dev->resource + PCI_IOV_RESOURCES + i;
if (res->parent)
nres++;
@@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return -ENOMEM;
}
+ if (pci_enable_resources(dev, bars)) {
+ dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+ return -ENOMEM;
+ }
+
if (iov->link != dev->devfn) {
pdev = pci_get_slot(dev->bus, iov->link);
if (!pdev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6f45a73c6e9..6d4a5319148 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -664,6 +664,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
error = platform_pci_set_power_state(dev, state);
if (!error)
pci_update_current_state(dev, state);
+ /* Fall back to PCI_D0 if native PM is not supported */
+ if (!dev->pm_cap)
+ dev->current_state = PCI_D0;
} else {
error = -ENODEV;
/* Fall back to PCI_D0 if native PM is not supported */
@@ -1126,7 +1129,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
if (atomic_add_return(1, &dev->enable_cnt) > 1)
return 0; /* already enabled */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ /* only skip sriov related */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ if (dev->resource[i].flags & flags)
+ bars |= (1 << i);
+ for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
if (dev->resource[i].flags & flags)
bars |= (1 << i);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 13ef8c37471..dcdc1f4a462 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
int illumination_supported:1;
int video_supported:1;
int fan_supported:1;
+ int system_event_supported:1;
struct mutex mutex;
};
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
u32 hci_result;
u32 value;
- if (!dev->key_event_valid) {
+ if (!dev->key_event_valid && dev->system_event_supported) {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
if (hci_result == HCI_SUCCESS) {
dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
/* enable event fifo */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+ if (hci_result == HCI_SUCCESS)
+ dev->system_event_supported = 1;
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
u32 hci_result, value;
+ int retries = 3;
- if (event != 0x80)
+ if (!dev->system_event_supported || event != 0x80)
return;
+
do {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
- if (hci_result == HCI_SUCCESS) {
+ switch (hci_result) {
+ case HCI_SUCCESS:
if (value == 0x100)
continue;
/* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
pr_info("Unknown key %x\n",
value);
}
- } else if (hci_result == HCI_NOT_SUPPORTED) {
+ break;
+ case HCI_NOT_SUPPORTED:
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
* become disabled. */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
pr_notice("Re-enabled hotkeys\n");
+ /* fall through */
+ default:
+ retries--;
+ break;
}
- } while (hci_result != HCI_EMPTY);
+ } while (retries && hci_result != HCI_EMPTY);
}
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index cffcb7c00b0..01fa671ec97 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK 0xC6
+#define PMIC_BATT_CHR_EXCPT_MASK 0x86
+
#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
batt_exception = 1;
- } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
- pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
- batt_exception = 1;
} else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
batt_exception = 1;
} else {
pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+ if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
+ /* PMIC will change charging current automatically */
+ pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
+ }
}
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index cf3f9997546..10451a15e82 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
{
- return 1; /* always round timer functions to one nanosecond */
+ tp->tv_sec = 0;
+ tp->tv_nsec = 1;
+ return 0;
}
static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 5225930a10c..691b1ab1a3d 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
INIT_WORK(&priv->idb_work, tsi721_db_dpc);
/* Allocate buffer for inbound doorbells queue */
- priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
&priv->idb_dma, GFP_KERNEL);
if (!priv->idb_base)
return -ENOMEM;
- memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
priv->idb_base, (unsigned long long)priv->idb_dma);
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
*/
/* Allocate space for DMA descriptors */
- bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
&bd_phys, GFP_KERNEL);
if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].bd_phys = bd_phys;
priv->bdma[chnum].bd_base = bd_ptr;
- memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
bd_num : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_KERNEL);
if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].sts_base = sts_ptr;
priv->bdma[chnum].sts_size = sts_size;
- memset(sts_ptr, 0, sts_size);
-
dev_dbg(&priv->pdev->dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
/* Outbound message descriptor status FIFO allocation */
priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
- priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
priv->omsg_ring[mbox].sts_size *
sizeof(struct tsi721_dma_sts),
&priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
goto out_desc;
}
- memset(priv->omsg_ring[mbox].sts_base, 0,
- entries * sizeof(struct tsi721_dma_sts));
-
/*
* Configure Outbound Messaging Engine
*/
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
INIT_LIST_HEAD(&mport->dbells);
rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
- rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
- rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+ rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+ rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
strcpy(mport->name, "Tsi721 mport");
/* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct tsi721_device *priv;
- int i;
+ int i, cap;
int err;
u32 regval;
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
}
- /* Clear "no snoop" and "relaxed ordering" bits. */
- pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
- regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
- pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+ cap = pci_pcie_cap(pdev);
+ BUG_ON(cap == 0);
+
+ /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
+ regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+ /* Adjust PCIe completion timeout. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
+ regval &= ~(0x0f);
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
/*
* FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 58be4deb140..822e54c394d 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,6 +72,8 @@
#define TSI721_MSIXPBA_OFFSET 0x2a000
#define TSI721_PCIECFG_EPCTL 0x400
+#define MAX_READ_REQUEST_SZ_SHIFT 12
+
/*
* Event Management Registers
*/
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 5abeb3ac3e8..298c6c6a279 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
break;
}
- if (!ri)
+ if (i == ARRAY_SIZE(aat2870_regulators))
return NULL;
ri->enable_addr = AAT2870_LDO_EN;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 669d0216022..938398f3e86 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2799,8 +2799,8 @@ void regulator_unregister(struct regulator_dev *rdev)
list_del(&rdev->list);
if (rdev->supply)
regulator_put(rdev->supply);
- device_unregister(&rdev->dev);
kfree(rdev->constraints);
+ device_unregister(&rdev->dev);
mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index ee8747f4fa0..11cc308d66e 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -71,6 +71,7 @@ struct twlreg_info {
#define VREG_TYPE 1
#define VREG_REMAP 2
#define VREG_DEDICATED 3 /* LDO control */
+#define VREG_VOLTAGE_SMPS_4030 9
/* TWL6030 register offsets */
#define VREG_TRANS 1
#define VREG_STATE 2
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
.get_status = twl4030reg_get_status,
};
+static int
+twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
+
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
+ vsel);
+ return 0;
+}
+
+static int twl4030smps_get_voltage(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE_SMPS_4030);
+
+ return vsel * 12500 + 600000;
+}
+
+static struct regulator_ops twl4030smps_ops = {
+ .set_voltage = twl4030smps_set_voltage,
+ .get_voltage = twl4030smps_get_voltage,
+};
+
static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
+#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
+ { \
+ .base = offset, \
+ .id = num, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL4030_REG_##label, \
+ .ops = &twl4030smps_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
.min_mV = min_mVolts, \
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e8326f26fa2..dc4c2748bbc 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
*/
delta = timespec_sub(old_system, old_rtc);
delta_delta = timespec_sub(delta, old_delta);
- if (abs(delta_delta.tv_sec) >= 2) {
+ if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
/*
* if delta_delta is too large, assume time correction
* has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
rtc_tm_to_time(&tm, &new_rtc.tv_sec);
new_rtc.tv_nsec = 0;
- if (new_rtc.tv_sec <= old_rtc.tv_sec) {
- if (new_rtc.tv_sec < old_rtc.tv_sec)
- pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
+ if (new_rtc.tv_sec < old_rtc.tv_sec) {
+ pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
return 0;
}
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
sleep_time = timespec_sub(sleep_time,
timespec_sub(new_system, old_system));
- timekeeping_inject_sleeptime(&sleep_time);
+ if (sleep_time.tv_sec >= 0)
+ timekeeping_inject_sleeptime(&sleep_time);
return 0;
}
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8e286259a00..3bcc7cfcaba 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -73,6 +73,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
err = -EINVAL;
mutex_unlock(&rtc->ops_lock);
+ /* A timer might have just expired */
+ schedule_work(&rtc->irqwork);
return err;
}
EXPORT_SYMBOL_GPL(rtc_set_time);
@@ -112,6 +114,8 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
err = -EINVAL;
mutex_unlock(&rtc->ops_lock);
+ /* A timer might have just expired */
+ schedule_work(&rtc->irqwork);
return err;
}
@@ -319,6 +323,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
+static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+ int err;
+
+ if (!rtc->ops)
+ err = -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ err = -EINVAL;
+ else
+ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+ return err;
+}
+
static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
struct rtc_time tm;
@@ -342,14 +360,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
* over right here, before we set the alarm.
*/
- if (!rtc->ops)
- err = -ENODEV;
- else if (!rtc->ops->set_alarm)
- err = -EINVAL;
- else
- err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
- return err;
+ return ___rtc_set_alarm(rtc, alarm);
}
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -396,6 +407,8 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
}
mutex_unlock(&rtc->ops_lock);
+ /* maybe that was in the past.*/
+ schedule_work(&rtc->irqwork);
return err;
}
EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
@@ -763,6 +776,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
return 0;
}
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+ struct rtc_wkalrm alarm;
+ struct rtc_time tm;
+
+ __rtc_read_time(rtc, &tm);
+
+ alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
+ ktime_set(300, 0)));
+ alarm.enabled = 0;
+
+ ___rtc_set_alarm(rtc, &alarm);
+}
+
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
* @rtc rtc device
@@ -784,8 +811,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
struct rtc_wkalrm alarm;
int err;
next = timerqueue_getnext(&rtc->timerqueue);
- if (!next)
+ if (!next) {
+ rtc_alarm_disable(rtc);
return;
+ }
alarm.time = rtc_ktime_to_tm(next->expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
@@ -847,7 +876,8 @@ again:
err = __rtc_set_alarm(rtc, &alarm);
if (err == -ETIME)
goto again;
- }
+ } else
+ rtc_alarm_disable(rtc);
mutex_unlock(&rtc->ops_lock);
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index eda128fc1d3..64aedd8cc09 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
static struct rtc_class_ops m41t80_rtc_ops = {
.read_time = m41t80_rtc_read_time,
.set_time = m41t80_rtc_set_time,
+ /*
+ * XXX - m41t80 alarm functionality is reported broken.
+ * until it is fixed, don't register alarm functions.
+ *
.read_alarm = m41t80_rtc_read_alarm,
.set_alarm = m41t80_rtc_set_alarm,
+ */
.proc = m41t80_rtc_proc,
+ /*
+ * See above comment on broken alarm
+ *
.alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
+ */
};
#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7639ab906f0..5b979d9cc33 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
- clk_enable(rtc_clk);
pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
+ clk_enable(rtc_clk);
writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC);
writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN);
writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 75c3f1f8fd4..a84631a7391 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
int chsc_chp_vary(struct chp_id chpid, int on)
{
struct channel_path *chp = chpid_to_chp(chpid);
- struct chp_link link;
- memset(&link, 0, sizeof(struct chp_link));
- link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
/* Try to update the channel path descritor. */
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
- __s390_vary_chpid_on, &link);
+ __s390_vary_chpid_on, &chpid);
} else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
- NULL, &link);
+ NULL, &chpid);
return 0;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 155a82bcb9e..4a1ff5c2eb8 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,8 +68,13 @@ struct schib {
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
enum sch_todo {
SCH_TODO_NOTHING,
+ SCH_TODO_EVAL,
SCH_TODO_UNREG,
};
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92d7324acb1..21908e67bf6 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
-static void css_sch_todo(struct work_struct *work)
-{
- struct subchannel *sch;
- enum sch_todo todo;
-
- sch = container_of(work, struct subchannel, todo_work);
- /* Find out todo. */
- spin_lock_irq(sch->lock);
- todo = sch->todo;
- CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
- sch->schid.sch_no, todo);
- sch->todo = SCH_TODO_NOTHING;
- spin_unlock_irq(sch->lock);
- /* Perform todo. */
- if (todo == SCH_TODO_UNREG)
- css_sch_device_unregister(sch);
- /* Release workqueue ref. */
- put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
- CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
- sch->schid.ssid, sch->schid.sch_no, todo);
- if (sch->todo >= todo)
- return;
- /* Get workqueue ref. */
- if (!get_device(&sch->dev))
- return;
- sch->todo = todo;
- if (!queue_work(cio_work_q, &sch->todo_work)) {
- /* Already queued, release workqueue ref. */
- put_device(&sch->dev);
- }
-}
-
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
css_schedule_eval(schid);
}
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+ int ret;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ switch (todo) {
+ case SCH_TODO_NOTHING:
+ break;
+ case SCH_TODO_EVAL:
+ ret = css_evaluate_known_subchannel(sch, 1);
+ if (ret == -EAGAIN) {
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, todo);
+ spin_unlock_irq(sch->lock);
+ }
+ break;
+ case SCH_TODO_UNREG:
+ css_sch_device_unregister(sch);
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
static wait_queue_head_t css_eval_wq;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d734f4a0eca..47269858ecb 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
*/
cdev->private->flags.resuming = 1;
cdev->private->path_new_mask = LPM_ANYPATH;
- css_schedule_eval(sch->schid);
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
spin_unlock_irq(sch->lock);
- css_complete_work();
+ css_wait_for_slow_path();
/* cdev may have been moved to a different subchannel. */
sch = to_subchannel(cdev->dev.parent);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 52c233fa2b1..1b853513c89 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
cdev->private->pgid_reset_mask = 0;
}
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void create_fake_irb(struct irb *irb, int type)
+{
+ memset(irb, 0, sizeof(*irb));
+ if (type == FAKE_CMD_IRB) {
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ } else if (type == FAKE_TM_IRB) {
+ struct tm_scsw *scsw = &irb->scsw.tm;
+ scsw->x = 1;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
@@ -520,12 +538,8 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- cdev->private->irb.scsw.cmd.cc = 1;
- cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
- cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
- cdev->private->irb.scsw.cmd.stctl =
- SCSW_STCTL_STATUS_PEND;
+ create_fake_irb(&cdev->private->irb,
+ cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f98698d5735..ec7fb6d3b47 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
- cdev->private->flags.fake_irb = 1;
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
return 0;
} else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
ret = cio_set_options (sch, flags);
if (ret)
return ret;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_TM_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 2ebb492a5c1..76253dfcc1b 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -111,6 +111,9 @@ enum cdev_todo {
CDEV_TODO_UNREG_EVAL,
};
+#define FAKE_CMD_IRB 1
+#define FAKE_TM_IRB 2
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
- unsigned int fake_irb:1; /* deliver faked irb */
+ unsigned int fake_irb:2; /* deliver faked irb */
unsigned int resuming:1; /* recognition while resume */
unsigned int pgroup:1; /* pathgroup is set up */
unsigned int mpath:1; /* multipathing is set up */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ec94f049e99..96bbe9d12a7 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1552,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
rc = ap_init_queue(ap_dev->qid);
if (rc == -ENODEV)
ap_dev->unregistered = 1;
+ else
+ __ap_schedule_poll_timer();
}
static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 11f07f88822..b79576b64f4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ /* if previous slave_alloc returned early, there is nothing to do */
+ if (!zfcp_sdev->port)
+ return;
+
zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
put_device(&zfcp_sdev->port->dev);
}
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 5f94d22c491..54266829290 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_writeb(client, *buf, off);
-
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_writeb(client, *buf, off);
+ if (ret < 0)
break;
- }
-
len--;
buf++;
off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_readb(client, buf, off);
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_readb(client, buf, off);
+ if (ret < 0)
break;
- }
len--;
buf++;
off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
.remove = __devexit_p(bbc_i2c_remove),
};
-static int __init bbc_i2c_init(void)
-{
- return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
- platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 965a1fccd66..4b9939726c3 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
.remove = __devexit_p(d7s_remove),
};
-static int __init d7s_init(void)
-{
- return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
- platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index be7b4e56154..339fd6f65ed 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
.remove = __devexit_p(envctrl_remove),
};
-static int __init envctrl_init(void)
-{
- return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
- platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
-module_init(envctrl_init);
-module_exit(envctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 73dd4e7afaa..826157f3869 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
.remove = __devexit_p(flash_remove),
};
-static int __init flash_init(void)
-{
- return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
- platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
-module_init(flash_init);
-module_exit(flash_cleanup);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index ebce9639a26..0b31658ccde 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
};
-static int __init uctrl_init(void)
-{
- return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
- platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
-module_init(uctrl_init);
-module_exit(uctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index dba72a4e6a1..1ad0b822556 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
spin_lock(&session->lock);
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
- if (!task) {
+ if (!task || !task->sc) {
spin_unlock(&session->lock);
return -EINVAL;
}
sc = task->sc;
- spin_unlock(&session->lock);
if (!blk_rq_cpu_valid(sc->request))
cpu = smp_processor_id();
else
cpu = sc->request->cpu;
+ spin_unlock(&session->lock);
+
p = &per_cpu(bnx2i_percpu, cpu);
spin_lock(&p->p_work_lock);
if (unlikely(!p->iothread)) {
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cefbe44bb84..8d67467dd9c 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,8 @@
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -101,6 +103,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
unsigned int);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
static bool fcoe_match(struct net_device *netdev);
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +133,11 @@ static struct notifier_block fcoe_cpu_notifier = {
.notifier_call = fcoe_cpu_callback,
};
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+ .notifier_call = fcoe_dcb_app_notification,
+};
+
static struct scsi_transport_template *fcoe_nport_scsi_transport;
static struct scsi_transport_template *fcoe_vport_scsi_transport;
@@ -1522,6 +1531,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_reset_network_header(skb);
skb->mac_len = elen;
skb->protocol = htons(ETH_P_FCOE);
+ skb->priority = port->priority;
+
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
stats->InvalidCRCCount++;
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+ put_cpu();
return -EINVAL;
}
@@ -1746,6 +1758,7 @@ int fcoe_percpu_receive_thread(void *arg)
*/
static void fcoe_dev_setup(void)
{
+ register_dcbevent_notifier(&dcb_notifier);
register_netdevice_notifier(&fcoe_notifier);
}
@@ -1754,9 +1767,69 @@ static void fcoe_dev_setup(void)
*/
static void fcoe_dev_cleanup(void)
{
+ unregister_dcbevent_notifier(&dcb_notifier);
unregister_netdevice_notifier(&fcoe_notifier);
}
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+ struct fcoe_interface *fcoe;
+ struct net_device *real_dev;
+
+ list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+ if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ real_dev = vlan_dev_real_dev(fcoe->netdev);
+ else
+ real_dev = fcoe->netdev;
+
+ if (netdev == real_dev)
+ return fcoe;
+ }
+ return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct dcb_app_type *entry = ptr;
+ struct fcoe_interface *fcoe;
+ struct net_device *netdev;
+ struct fcoe_port *port;
+ int prio;
+
+ if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+ return NOTIFY_OK;
+
+ netdev = dev_get_by_index(&init_net, entry->ifindex);
+ if (!netdev)
+ return NOTIFY_OK;
+
+ fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+ dev_put(netdev);
+ if (!fcoe)
+ return NOTIFY_OK;
+
+ if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+ prio = ffs(entry->app.priority) - 1;
+ else
+ prio = entry->app.priority;
+
+ if (prio < 0)
+ return NOTIFY_OK;
+
+ if (entry->app.protocol == ETH_P_FIP ||
+ entry->app.protocol == ETH_P_FCOE)
+ fcoe->ctlr.priority = prio;
+
+ if (entry->app.protocol == ETH_P_FCOE) {
+ port = lport_priv(fcoe->ctlr.lp);
+ port->priority = prio;
+ }
+
+ return NOTIFY_OK;
+}
+
/**
* fcoe_device_notification() - Handler for net device events
* @notifier: The context of the notification
@@ -1965,6 +2038,46 @@ static bool fcoe_match(struct net_device *netdev)
}
/**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+ int dcbx;
+ u8 fup, up;
+ struct net_device *netdev = fcoe->realdev;
+ struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+ struct dcb_app app = {
+ .priority = 0,
+ .protocol = ETH_P_FCOE
+ };
+
+ /* setup DCB priority attributes. */
+ if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+ dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+ if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+ up = dcb_ieee_getapp_mask(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_ieee_getapp_mask(netdev, &app);
+ } else {
+ app.selector = DCB_APP_IDTYPE_ETHTYPE;
+ up = dcb_getapp(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_getapp(netdev, &app);
+ }
+
+ port->priority = ffs(up) ? ffs(up) - 1 : 0;
+ fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+ }
+#endif
+}
+
+/**
* fcoe_create() - Create a fcoe interface
* @netdev : The net_device object the Ethernet interface to create on
* @fip_mode: The FIP mode for this creation
@@ -2007,6 +2120,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
/* Make this the "master" N_Port */
fcoe->ctlr.lp = lport;
+ /* setup DCB priority attributes. */
+ fcoe_dcb_create(fcoe);
+
/* add to lports list */
fcoe_hostlist_add(lport);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index c74c4b8e71e..e7522dcc296 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
skb_put(skb, sizeof(*sol));
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
}
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
return 0;
@@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 4e041f6d808..d570573b796 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -4335,7 +4335,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ac326c41e93..6465dae5883 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
scsi_qla_host_t *vha = shost_priv(shost);
struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
- if (!base_vha->flags.online)
+ if (!base_vha->flags.online) {
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
- else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
- fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
- else
+ return;
+ }
+
+ switch (atomic_read(&base_vha->loop_state)) {
+ case LOOP_UPDATE:
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ break;
+ case LOOP_DOWN:
+ if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_DEAD:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_READY:
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
}
static int
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9df4787715c..f3cddd5800c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,17 +12,17 @@
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0116 | |
- * | Mailbox commands | 0x1129 | |
+ * | Mailbox commands | 0x112b | |
* | Device Discovery | 0x2083 | |
* | Queue Command and IO tracing | 0x302e | 0x3008 |
* | DPC Thread | 0x401c | |
* | Async Events | 0x5059 | |
- * | Timer Routines | 0x600d | |
+ * | Timer Routines | 0x6010 | 0x600e,0x600f |
* | User Space Interactions | 0x709d | |
- * | Task Management | 0x8041 | |
+ * | Task Management | 0x8041 | 0x800b |
* | AER/EEH | 0x900f | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb051 | |
+ * | ISP82XX Specific | 0xb052 | |
* | MultiQ | 0xc00b | |
* | Misc | 0xd00b | |
* ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ce32d8135c9..c0c11afb685 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -578,6 +578,7 @@ extern int qla82xx_check_md_needed(scsi_qla_host_t *);
extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f03e915f187..54ea68cec4c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1509,7 +1509,8 @@ enable_82xx_npiv:
&ha->fw_xcb_count, NULL, NULL,
&ha->max_npiv_vports, NULL);
- if (!fw_major_version && ql2xallocfwdump)
+ if (!fw_major_version && ql2xallocfwdump
+ && !IS_QLA82XX(ha))
qla2x00_alloc_fw_dump(vha);
}
} else {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index dbec89622a0..a4b267e60a3 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -120,11 +120,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
* Returns a pointer to the continuation type 1 IOCB packet.
*/
static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
{
cont_a64_entry_t *cont_pkt;
- struct req_que *req = vha->req;
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
@@ -292,7 +291,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -684,7 +683,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -2070,7 +2069,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ vha->hw->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
@@ -2096,6 +2096,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
int index;
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
@@ -2141,7 +2142,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ ha->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2516adf1aee..7b91b290ffd 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1741,7 +1741,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
- break;
+ goto check_scsi_status;
}
if (!lscsi_status &&
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3b3cec9f6ac..82a33533ed2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ql_log(ql_log_warn, base_vha, 0x1004,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
- rval = QLA_FUNCTION_FAILED;
- goto premature_exit;
+ return QLA_FUNCTION_TIMEOUT;
}
/*
@@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112a,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101c,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112b,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101e,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort.\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 94bded5ddce..03554934b0a 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3817,6 +3817,20 @@ exit:
return rval;
}
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ ha->flags.mbox_busy = 0;
+ ql_log(ql_log_warn, vha, 0x6010,
+ "Doing premature completion of mbx command.\n");
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
+ }
+}
+
void qla82xx_watchdog(scsi_qla_host_t *vha)
{
uint32_t dev_state, halt_status;
@@ -3839,9 +3853,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
+ ql_dbg(ql_dbg_timer, vha, 0x6011,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
halt_status = qla82xx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
- ql_dbg(ql_dbg_timer, vha, 0x6005,
+ ql_log(ql_log_info, vha, 0x6005,
"dumping hw/fw registers:.\n "
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
" PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3876,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
QLA82XX_CRB_PEG_NET_3 + 0x3c),
qla82xx_rd_32(ha,
QLA82XX_CRB_PEG_NET_4 + 0x3c));
+ if (LSW(MSB(halt_status)) == 0x67)
+ ql_log(ql_log_warn, vha, 0xb052,
+ "Firmware aborted with "
+ "error code 0x00006700. Device is "
+ "being reset.\n");
if (halt_status & HALT_STATUS_UNRECOVERABLE) {
set_bit(ISP_UNRECOVERABLE,
&vha->dpc_flags);
@@ -3869,16 +3892,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
}
qla2xxx_wake_dpc(vha);
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_log(ql_log_warn, vha, 0x6007,
- "Due to FW hung, doing "
- "premature completion of mbx "
- "command.\n");
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
+ ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+ qla82xx_clear_pending_mbx(vha);
}
}
}
@@ -4073,10 +4088,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
msleep(1000);
if (qla82xx_check_fw_alive(vha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- complete(&ha->mbx_intr_comp);
- }
+ qla82xx_clear_pending_mbx(vha);
break;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57820c199bc..57a226be339 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fd14c7bfc62..f9e5b85e84d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -201,12 +201,12 @@ MODULE_PARM_DESC(ql2xmdcapmask,
"Set the Minidump driver capture mask level. "
"Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
-int ql2xmdenable;
+int ql2xmdenable = 1;
module_param(ql2xmdenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdenable,
"Enable/disable MiniDump. "
- "0 (Default) - MiniDump disabled. "
- "1 - MiniDump enabled.");
+ "0 - MiniDump disabled. "
+ "1 (Default) - MiniDump enabled.");
/*
* SCSI host template entry points
@@ -423,6 +423,7 @@ fail2:
qla25xx_delete_queues(vha);
destroy_workqueue(ha->wq);
ha->wq = NULL;
+ vha->req = ha->req_q_map[0];
fail:
ha->mqenable = 0;
kfree(ha->req_q_map);
@@ -814,49 +815,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-/*
- * qla2x00_wait_for_loop_ready
- * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
- * to be in LOOP_READY state.
- * Input:
- * ha - pointer to host adapter structure
- *
- * Note:
- * Does context switching-Release SPIN_LOCK
- * (if any) before calling this routine.
- *
- *
- * Return:
- * Success (LOOP_READY) : 0
- * Failed (LOOP_NOT_READY) : 1
- */
-static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
-{
- int return_status = QLA_SUCCESS;
- unsigned long loop_timeout ;
- struct qla_hw_data *ha = vha->hw;
- scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
-
- /* wait for 5 min at the max for loop to be ready */
- loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-
- while ((!atomic_read(&base_vha->loop_down_timer) &&
- atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
- atomic_read(&base_vha->loop_state) != LOOP_READY) {
- if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- msleep(1000);
- if (time_after_eq(jiffies, loop_timeout)) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- }
- return (return_status);
-}
-
static void
sp_get(struct srb *sp)
{
@@ -1035,12 +993,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
"Wait for hba online failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
- err = 1;
- if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x800b,
- "Wait for loop ready failed for cmd=%p.\n", cmd);
- goto eh_reset_failed;
- }
err = 2;
if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
!= QLA_SUCCESS) {
@@ -1137,10 +1089,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
goto eh_bus_reset_done;
}
- if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
- if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
- ret = SUCCESS;
- }
+ if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+ ret = SUCCESS;
+
if (ret == FAILED)
goto eh_bus_reset_done;
@@ -1206,15 +1157,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
- /*
- * Fixme-may be dpc thread is active and processing
- * loop_resync,so wait a while for it to
- * be completed and then issue big hammer.Otherwise
- * it may cause I/O failure as big hammer marks the
- * devices as lost kicking of the port_down_timer
- * while dpc is stuck for the mailbox to complete.
- */
- qla2x00_wait_for_loop_ready(vha);
if (vha != base_vha) {
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
@@ -1297,16 +1239,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
qla2x00_mark_all_devices_lost(vha, 0);
- qla2x00_wait_for_loop_ready(vha);
}
if (ha->flags.enable_lip_reset) {
ret = qla2x00_lip_reset(vha);
- if (ret != QLA_SUCCESS) {
+ if (ret != QLA_SUCCESS)
ql_dbg(ql_dbg_taskm, vha, 0x802e,
"lip_reset failed (%d).\n", ret);
- } else
- qla2x00_wait_for_loop_ready(vha);
}
/* Issue marker command only when we are going to start the I/O */
@@ -4070,13 +4009,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
/* For ISP82XX complete any pending mailbox cmd */
if (IS_QLA82XX(ha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_dbg(ql_dbg_aer, vha, 0x9001,
- "Due to pci channel io frozen, doing premature "
- "completion of mbx command.\n");
- complete(&ha->mbx_intr_comp);
- }
+ ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+ qla82xx_clear_pending_mbx(vha);
}
qla2x00_free_irqs(vha);
pci_disable_device(pdev);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 13b6357c1fa..23f33a6d52d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.07.07-k"
+#define QLA2XXX_VERSION "8.03.07.12-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index ace637bf254..fd5edc6e166 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -147,7 +147,7 @@
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
-#define QL4_SESS_RECOVERY_TMO 30 /* iSCSI session */
+#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
/* recovery timeout */
#define LSDW(x) ((u32)((u64)(x)))
@@ -173,6 +173,8 @@
#define ISNS_DEREG_TOV 5
#define HBA_ONLINE_TOV 30
#define DISABLE_ACB_TOV 30
+#define IP_CONFIG_TOV 30
+#define LOGIN_TOV 12
#define MAX_RESET_HA_RETRIES 2
@@ -240,6 +242,45 @@ struct ddb_entry {
uint16_t fw_ddb_index; /* DDB firmware index */
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
+ uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+ struct dev_db_entry fw_ddb_entry;
+ int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+ int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+
+ /* Driver Re-login */
+ unsigned long flags; /* DDB Flags */
+ uint16_t default_relogin_timeout; /* Max time to wait for
+ * relogin to complete */
+ atomic_t retry_relogin_timer; /* Min Time between relogins
+ * (4000 only) */
+ atomic_t relogin_timer; /* Max Time to wait for
+ * relogin to complete */
+ atomic_t relogin_retry_count; /* Num of times relogin has been
+ * retried */
+ uint32_t default_time2wait; /* Default Min time between
+ * relogins (+aens) */
+
+};
+
+struct qla_ddb_index {
+ struct list_head list;
+ uint16_t fw_ddb_idx;
+ struct dev_db_entry fw_ddb;
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+ int port;
+ int tpgt;
+ char ip_addr[DDB_IPADDR_LEN];
+ char iscsi_name[ISCSI_NAME_SIZE];
+ uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
};
/*
@@ -411,7 +452,7 @@ struct scsi_qla_host {
#define AF_FW_RECOVERY 19 /* 0x00080000 */
#define AF_EEH_BUSY 20 /* 0x00100000 */
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
-
+#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -604,6 +645,7 @@ struct scsi_qla_host {
uint16_t bootload_minor;
uint16_t bootload_patch;
uint16_t bootload_build;
+ uint16_t def_timeout; /* Default login timeout */
uint32_t flash_state;
#define QLFLASH_WAITING 0
@@ -623,6 +665,11 @@ struct scsi_qla_host {
uint16_t iscsi_pci_func_cnt;
uint8_t model_name[16];
struct completion disable_acb_comp;
+ struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+ uint16_t pri_ddb_idx;
+ uint16_t sec_ddb_idx;
+ int is_reset;
};
struct ql4_task_data {
@@ -835,6 +882,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
/*---------------------------------------------------------------------------*/
/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER 0
+#define RESET_ADAPTER 1
+
#define PRESERVE_DDB_LIST 0
#define REBUILD_DDB_LIST 1
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index cbd5a20dbbd..4ac07f88252 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -12,6 +12,7 @@
#define MAX_PRST_DEV_DB_ENTRIES 64
#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
#define MAX_DEV_DB_ENTRIES 512
+#define MAX_DEV_DB_ENTRIES_40XX 256
/*************************************************************************
*
@@ -604,6 +605,13 @@ struct addr_ctrl_blk {
uint8_t res14[140]; /* 274-2FF */
};
+#define IP_ADDR_COUNT 4 /* Total 4 IP address supported in one interface
+ * One IPv4, one IPv6 link local and 2 IPv6
+ */
+
+#define IP_STATE_MASK 0x0F000000
+#define IP_STATE_SHIFT 24
+
struct init_fw_ctrl_blk {
struct addr_ctrl_blk pri;
/* struct addr_ctrl_blk sec;*/
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 160db9d5ea2..d0dd4b33020 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -13,7 +13,7 @@ struct iscsi_cls_conn;
int qla4xxx_hw_reset(struct scsi_qla_host *ha);
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
@@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t *mbx_sts);
int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
uint16_t stats_size, dma_addr_t stats_dma);
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry);
int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
uint32_t region, uint32_t field0,
uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
/* BSG Functions */
int qla4xxx_bsg_request(struct bsg_job *bsg_job);
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 3075fbaef55..1bdfa8120ac 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -773,22 +773,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
* be freed so that when login happens from user space there are free DDB
* indices available.
**/
-static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
{
int max_ddbs;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
- max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
for (idx = 0; idx < max_ddbs; idx = next_idx) {
ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
&next_idx, &state, &conn_err,
NULL, NULL);
- if (ret == QLA_ERROR)
+ if (ret == QLA_ERROR) {
+ next_idx++;
continue;
+ }
if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
state == DDB_DS_SESSION_FAILED) {
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +806,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
}
}
-
/**
* qla4xxx_initialize_adapter - initiailizes hba
* @ha: Pointer to host adapter structure.
@@ -812,7 +813,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
* This routine parforms all of the steps necessary to initialize the adapter.
*
**/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
{
int status = QLA_ERROR;
@@ -840,7 +841,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
if (status == QLA_ERROR)
goto exit_init_hba;
- qla4xxx_free_ddb_index(ha);
+ if (is_reset == RESET_ADAPTER)
+ qla4xxx_build_ddb_list(ha, is_reset);
set_bit(AF_ONLINE, &ha->flags);
exit_init_hba:
@@ -855,38 +857,12 @@ exit_init_hba:
return status;
}
-/**
- * qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
- *
- * This routine processes a Decive Database Changed AEN Event.
- **/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
- uint32_t state, uint32_t conn_err)
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
{
- struct ddb_entry * ddb_entry;
uint32_t old_fw_ddb_device_state;
int status = QLA_ERROR;
- /* check for out of range index */
- if (fw_ddb_index >= MAX_DDB_ENTRIES)
- goto exit_ddb_event;
-
- /* Get the corresponging ddb entry */
- ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
- /* Device does not currently exist in our database. */
- if (ddb_entry == NULL) {
- ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
- __func__, fw_ddb_index);
-
- if (state == DDB_DS_NO_CONNECTION_ACTIVE)
- clear_bit(fw_ddb_index, ha->ddb_idx_map);
-
- goto exit_ddb_event;
- }
-
old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +876,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -936,9 +910,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -954,7 +926,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
__func__));
break;
}
+ return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+ /*
+ * This triggers a relogin. After the relogin_timer
+ * expires, the relogin gets scheduled. We must wait a
+ * minimum amount of time since receiving an 0x8014 AEN
+ * with failed device_state or a logout response before
+ * we can issue another relogin.
+ *
+ * Firmware pads this timeout: (time2wait +1).
+ * Driver retry to login should be longer than F/W.
+ * Otherwise F/W will fail
+ * set_ddb() mbx cmd with 0x4005 since it still
+ * counting down its time2wait.
+ */
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
+{
+ uint32_t old_fw_ddb_device_state;
+ int status = QLA_ERROR;
+
+ old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB - old state = 0x%x, new state = 0x%x for "
+ "index [%d]\n", __func__,
+ ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+ ddb_entry->fw_ddb_device_state = state;
+
+ switch (old_fw_ddb_device_state) {
+ case DDB_DS_LOGIN_IN_PROCESS:
+ case DDB_DS_NO_CONNECTION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_FAILED:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ default:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+ __func__));
+ break;
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+ uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_err)
+{
+ struct ddb_entry *ddb_entry;
+ int status = QLA_ERROR;
+
+ /* check for out of range index */
+ if (fw_ddb_index >= MAX_DDB_ENTRIES)
+ goto exit_ddb_event;
+
+ /* Get the corresponging ddb entry */
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+ /* Device does not currently exist in our database. */
+ if (ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+ __func__, fw_ddb_index);
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+ clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+ goto exit_ddb_event;
+ }
+
+ ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
exit_ddb_event:
return status;
}
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ uint32_t mbx_sts = 0;
+ int ret;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags))
+ return;
+
+ if (ddb_entry->ddb_type != FLASH_DDB) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Skipping login to non FLASH DB"));
+ goto exit_login;
+ }
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_login;
+ }
+
+ if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+ ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR)
+ goto exit_login;
+
+ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+ ha->tot_ddbs++;
+ }
+
+ memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+ ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+ ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_dma, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+ goto exit_login;
+ }
+
+ ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+ ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+ sess->targetname);
+ goto exit_login;
+ }
+
+exit_login:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 4c2b8487039..c2593782fbb 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
return status;
}
+ if (is_qla40XX(ha)) {
+ if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+ "prematurely completing mbx cmd as "
+ "adapter removal detected\n",
+ ha->host_no, __func__));
+ return status;
+ }
+ }
+
if (is_qla8022(ha)) {
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -413,6 +423,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
memcpy(ha->name_string, init_fw_cb->iscsi_name,
min(sizeof(ha->name_string),
sizeof(init_fw_cb->iscsi_name)));
+ ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
/*memcpy(ha->alias, init_fw_cb->Alias,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 30f31b127f3..4169c8baa11 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -31,6 +32,13 @@ static struct kmem_cache *srb_cachep;
/*
* Module parameter information and variables
*/
+int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+ "Set to disable exporting boot targets to sysfs\n"
+ " 0 - Export boot targets\n"
+ " 1 - Do not export boot targets (Default)");
+
int ql4xdontresethba = 0;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
@@ -63,7 +71,7 @@ static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
"Target Session Recovery Timeout.\n"
- " Default: 30 sec.");
+ " Default: 120 sec.");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
@@ -415,7 +423,7 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
qla_ep = ep->dd_data;
ha = to_qla_host(qla_ep->host);
- if (adapter_up(ha))
+ if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
ret = 1;
return ret;
@@ -975,6 +983,150 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
}
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+ uint32_t mbx_sts = 0;
+ uint16_t tmp_ddb_index;
+ int ret;
+
+get_ddb_index:
+ tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+ if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free DDB index not available\n"));
+ ret = QLA_ERROR;
+ goto exit_get_ddb_index;
+ }
+
+ if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+ goto get_ddb_index;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Found a free DDB index at %d\n", tmp_ddb_index));
+ ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+ ql4_printk(KERN_INFO, ha,
+ "DDB index = %d not available trying next\n",
+ tmp_ddb_index);
+ goto get_ddb_index;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free FW DDB not available\n"));
+ }
+
+ *ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+ return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ char *existing_ipaddr,
+ char *user_ipaddr)
+{
+ uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+ char formatted_ipaddr[DDB_IPADDR_LEN];
+ int status = QLA_SUCCESS, ret = 0;
+
+ if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+ ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+ } else {
+ ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+ }
+
+ if (strcmp(existing_ipaddr, formatted_ipaddr))
+ status = QLA_ERROR;
+
+out_match:
+ return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int idx = 0, max_ddbs, rval;
+ struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+ struct iscsi_session *sess, *existing_sess;
+ struct iscsi_conn *conn, *existing_conn;
+ struct ddb_entry *ddb_entry;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ if (sess->targetname == NULL ||
+ conn->persistent_address == NULL ||
+ conn->persistent_port == 0)
+ return QLA_ERROR;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (ddb_entry->ddb_type != FLASH_DDB)
+ continue;
+
+ existing_sess = ddb_entry->sess->dd_data;
+ existing_conn = ddb_entry->conn->dd_data;
+
+ if (existing_sess->targetname == NULL ||
+ existing_conn->persistent_address == NULL ||
+ existing_conn->persistent_port == 0)
+ continue;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IQN = %s User IQN = %s\n",
+ existing_sess->targetname,
+ sess->targetname));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IP = %s User IP = %s\n",
+ existing_conn->persistent_address,
+ conn->persistent_address));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port = %d User Port = %d\n",
+ existing_conn->persistent_port,
+ conn->persistent_port));
+
+ if (strcmp(existing_sess->targetname, sess->targetname))
+ continue;
+ rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+ existing_conn->persistent_address,
+ conn->persistent_address);
+ if (rval == QLA_ERROR)
+ continue;
+ if (existing_conn->persistent_port != conn->persistent_port)
+ continue;
+ break;
+ }
+
+ if (idx == max_ddbs)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match found in fwdb sessions\n"));
+ return QLA_SUCCESS;
+}
+
static struct iscsi_cls_session *
qla4xxx_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1136,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
struct scsi_qla_host *ha;
struct qla_endpoint *qla_ep;
struct ddb_entry *ddb_entry;
- uint32_t ddb_index;
- uint32_t mbx_sts = 0;
+ uint16_t ddb_index;
struct iscsi_session *sess;
struct sockaddr *dst_addr;
int ret;
@@ -1000,32 +1151,9 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
ha = to_qla_host(qla_ep->host);
-get_ddb_index:
- ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
-
- if (ddb_index >= MAX_DDB_ENTRIES) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free DDB index not available\n"));
- return NULL;
- }
-
- if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
- goto get_ddb_index;
-
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Found a free DDB index at %d\n", ddb_index));
- ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
- if (ret == QLA_ERROR) {
- if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
- ql4_printk(KERN_INFO, ha,
- "DDB index = %d not available trying next\n",
- ddb_index);
- goto get_ddb_index;
- }
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free FW DDB not available\n"));
+ ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+ if (ret == QLA_ERROR)
return NULL;
- }
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1168,8 @@ get_ddb_index:
ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
ddb_entry->ha = ha;
ddb_entry->sess = cls_sess;
+ ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+ ddb_entry->ddb_change = qla4xxx_ddb_change;
cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
ha->tot_ddbs++;
@@ -1077,6 +1207,9 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
conn_idx);
+ if (!cls_conn)
+ return NULL;
+
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->conn = cls_conn;
@@ -1109,7 +1242,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
- struct dev_db_entry *fw_ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
uint32_t mbx_sts = 0;
int ret = 0;
@@ -1120,12 +1253,25 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ /* Check if we have matching FW DDB, if yes then do not
+ * login to this target. This could cause target to logout previous
+ * connection
+ */
+ ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+ if (ret == QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "Session already exist in FW.\n");
+ ret = -EEXIST;
+ goto exit_conn_start;
+ }
+
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto exit_conn_start;
}
ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1284,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
if (mbx_sts)
if (ddb_entry->fw_ddb_device_state ==
DDB_DS_SESSION_ACTIVE) {
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
goto exit_set_param;
}
@@ -1167,8 +1311,9 @@ exit_set_param:
ret = 0;
exit_conn_start:
- dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
- fw_ddb_entry, fw_ddb_entry_dma);
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
return ret;
}
@@ -1344,6 +1489,101 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
return -ENOSYS;
}
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ struct iscsi_cls_session *cls_sess,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int buflen = 0;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ char ip_addr[DDB_IPADDR_LEN];
+ uint16_t options = 0;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+ sess->initial_r2t_en =
+ (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+ sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+ conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+ (char *)fw_ddb_entry->iscsi_name, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+ (char *)ha->name_string, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+ (char *)ip_addr, buflen);
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ uint32_t ddb_state;
+ dma_addr_t fw_ddb_entry_dma;
+ struct dev_db_entry *fw_ddb_entry;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_session_conn_fwddb_param;
+ }
+
+ if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+ fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+ NULL, NULL, NULL) == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "get_ddb_entry for fw_ddb_index %d\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index));
+ goto exit_session_conn_fwddb_param;
+ }
+
+ cls_sess = ddb_entry->sess;
+
+ cls_conn = ddb_entry->conn;
+
+ /* Update params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
@@ -1360,7 +1600,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return;
+ goto exit_session_conn_param;
}
if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1610,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
"get_ddb_entry for fw_ddb_index %d\n",
ha->host_no, __func__,
ddb_entry->fw_ddb_index));
- return;
+ goto exit_session_conn_param;
}
cls_sess = ddb_entry->sess;
@@ -1379,6 +1619,12 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
cls_conn = ddb_entry->conn;
conn = cls_conn->dd_data;
+ /* Update timers after login */
+ ddb_entry->default_relogin_timeout =
+ le16_to_cpu(fw_ddb_entry->def_timeout);
+ ddb_entry->default_time2wait =
+ le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
/* Update params */
conn->max_recv_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1653,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
memcpy(sess->initiatorname, ha->name_string,
min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
}
/*
@@ -1607,6 +1858,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
vfree(ha->chap_list);
ha->chap_list = NULL;
+ if (ha->fw_ddb_dma_pool)
+ dma_pool_destroy(ha->fw_ddb_dma_pool);
+
/* release io space registers */
if (is_qla8022(ha)) {
if (ha->nx_pcibase)
@@ -1689,6 +1943,16 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
goto mem_alloc_error_exit;
}
+ ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+ DDB_DMA_BLOCK_SIZE, 8, 0);
+
+ if (ha->fw_ddb_dma_pool == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: fw_ddb_dma_pool allocation failed..\n",
+ __func__);
+ goto mem_alloc_error_exit;
+ }
+
return QLA_SUCCESS;
mem_alloc_error_exit:
@@ -1800,6 +2064,60 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
}
}
+void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+ INVALID_ENTRY) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+ 0) {
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ INVALID_ENTRY);
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index [%d] login device\n",
+ __func__, ddb_entry->fw_ddb_index));
+ } else
+ atomic_dec(&ddb_entry->retry_relogin_timer);
+ }
+ }
+
+ /* Wait for relogin to timeout */
+ if (atomic_read(&ddb_entry->relogin_timer) &&
+ (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+ /*
+ * If the relogin times out and the device is
+ * still NOT ONLINE then try and relogin again.
+ */
+ if (!iscsi_is_session_online(cls_sess)) {
+ /* Reset retry relogin timer */
+ atomic_inc(&ddb_entry->relogin_retry_count);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index[%d] relogin timed out-retrying"
+ " relogin (%d), retry (%d)\n", __func__,
+ ddb_entry->fw_ddb_index,
+ atomic_read(&ddb_entry->relogin_retry_count),
+ ddb_entry->default_time2wait + 4));
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+ }
+ }
+}
+
/**
* qla4xxx_timer - checks every second for work to do.
* @ha: Pointer to host adapter structure.
@@ -1809,6 +2127,8 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
int start_dpc = 0;
uint16_t w;
+ iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
/* If we are in the middle of AER/EEH processing
* skip any processing and reschedule the timer
*/
@@ -2078,7 +2398,12 @@ static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
- iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+
+ if (ddb_entry->ddb_type == FLASH_DDB)
+ iscsi_block_session(ddb_entry->sess);
+ else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
/**
@@ -2163,7 +2488,7 @@ recover_ha_init_adapter:
/* NOTE: AF_ONLINE flag set upon successful completion of
* qla4xxx_initialize_adapter */
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
}
/* Retry failed adapter initialization, if necessary
@@ -2245,17 +2570,108 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
iscsi_unblock_session(ddb_entry->sess);
} else {
/* Trigger relogin */
- iscsi_session_failure(cls_session->dd_data,
- ISCSI_ERR_CONN_FAILED);
+ if (ddb_entry->ddb_type == FLASH_DDB) {
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ } else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
}
}
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+
+ iscsi_unblock_session(ddb_entry->sess);
+
+ /* Start scan target */
+ if (test_bit(AF_ONLINE, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " start scan\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+ }
+ return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock user space session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ iscsi_conn_start(ddb_entry->conn);
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_LOGGED_IN);
+
+ return QLA_SUCCESS;
+}
+
static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
{
iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
}
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ uint16_t relogin_timer;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ relogin_timer = max(ddb_entry->default_relogin_timeout,
+ (uint16_t)RELOGIN_TOV);
+ atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+ ddb_entry->fw_ddb_index, relogin_timer));
+
+ qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "relogin issued\n"));
+ qla4xxx_relogin_flash_ddb(cls_sess);
+ }
+}
+
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
if (ha->dpc_thread)
@@ -2356,6 +2772,12 @@ dpc_post_reset_ha:
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
+ /* ---- relogin device? --- */
+ if (adapter_up(ha) &&
+ test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+ iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+ }
+
/* ---- link change? --- */
if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2790,12 @@ dpc_post_reset_ha:
* fatal error recovery. Therefore, the driver must
* manually relogin to devices when recovering from
* connection failures, logouts, expired KATO, etc. */
-
- qla4xxx_relogin_all_devices(ha);
+ if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+ qla4xxx_build_ddb_list(ha, ha->is_reset);
+ iscsi_host_for_each_session(ha->host,
+ qla4xxx_login_flash_ddb);
+ } else
+ qla4xxx_relogin_all_devices(ha);
}
}
}
@@ -2867,6 +3293,9 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
" target ID %d\n", __func__, ddb_index[0],
ddb_index[1]));
+ ha->pri_ddb_idx = ddb_index[0];
+ ha->sec_ddb_idx = ddb_index[1];
+
exit_boot_info_free:
dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
exit_boot_info:
@@ -3034,6 +3463,9 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
return ret;
}
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+
if (ddb_index[0] == 0xffff)
goto sec_target;
@@ -3066,7 +3498,15 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
struct iscsi_boot_kobj *boot_kobj;
if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
- return 0;
+ return QLA_ERROR;
+
+ if (ql4xdisablesysfsboot) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: syfsboot disabled - driver will trigger login"
+ "and publish session for discovery .\n", __func__);
+ return QLA_SUCCESS;
+ }
+
ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
if (!ha->boot_kset)
@@ -3108,7 +3548,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
if (!boot_kobj)
goto put_host;
- return 0;
+ return QLA_SUCCESS;
put_host:
scsi_host_put(ha->host);
@@ -3174,9 +3614,507 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
exit_chap_list:
dma_free_coherent(&ha->pdev->dev, chap_size,
chap_flash_data, chap_dma);
- return;
}
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ struct scsi_qla_host *ha;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ ha = ddb_entry->ha;
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+ cls_conn = ddb_entry->conn;
+ conn = cls_conn->dd_data;
+
+ tddb->tpgt = sess->tpgt;
+ tddb->port = conn->persistent_port;
+ strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+ strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ uint16_t options = 0;
+
+ tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+ min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ tddb->port = le16_to_cpu(fw_ddb_entry->port);
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+ struct ql4_tuple_ddb *old_tddb,
+ struct ql4_tuple_ddb *new_tddb)
+{
+ if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+ return QLA_ERROR;
+
+ if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+ return QLA_ERROR;
+
+ if (old_tddb->port != new_tddb->port)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+ old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+ old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+ new_tddb->ip_addr, new_tddb->iscsi_name));
+
+ return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct ddb_entry *ddb_entry;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int idx;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+ struct list_head *list_nt,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static void qla4xxx_free_nt_list(struct list_head *list_nt)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+
+ /* Free up the normaltargets list */
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ list_del_init(&nt_ddb_idx->list);
+ vfree(nt_ddb_idx);
+ }
+
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct iscsi_endpoint *ep;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ struct sockaddr *dst_addr;
+ char *ip;
+
+ /* TODO: need to destroy on unload iscsi_endpoint*/
+ dst_addr = vmalloc(sizeof(*dst_addr));
+ if (!dst_addr)
+ return NULL;
+
+ if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+ dst_addr->sa_family = AF_INET6;
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ ip = (char *)&addr6->sin6_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+ addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+ } else {
+ dst_addr->sa_family = AF_INET;
+ addr = (struct sockaddr_in *)dst_addr;
+ ip = (char *)&addr->sin_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+ addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+ }
+
+ ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+ vfree(dst_addr);
+ return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+ if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+ return QLA_ERROR;
+ return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ ddb_entry->ddb_type = FLASH_DDB;
+ ddb_entry->fw_ddb_index = INVALID_ENTRY;
+ ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+ ddb_entry->ha = ha;
+ ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+ ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->relogin_retry_count, 0);
+
+ ddb_entry->default_relogin_timeout =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+ ddb_entry->default_time2wait =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+ uint32_t idx = 0;
+ uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+ uint32_t sts[MBOX_REG_COUNT];
+ uint32_t ip_state;
+ unsigned long wtime;
+ int ret;
+
+ wtime = jiffies + (HZ * IP_CONFIG_TOV);
+ do {
+ for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+ if (ip_idx[idx] == -1)
+ continue;
+
+ ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+ if (ret == QLA_ERROR) {
+ ip_idx[idx] = -1;
+ continue;
+ }
+
+ ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Waiting for IP state for idx = %d, state = 0x%x\n",
+ ip_idx[idx], ip_state));
+ if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+ ip_state == IP_ADDRSTATE_INVALID ||
+ ip_state == IP_ADDRSTATE_PREFERRED ||
+ ip_state == IP_ADDRSTATE_DEPRICATED ||
+ ip_state == IP_ADDRSTATE_DISABLING)
+ ip_idx[idx] = -1;
+
+ }
+
+ /* Break if all IP states checked */
+ if ((ip_idx[0] == -1) &&
+ (ip_idx[1] == -1) &&
+ (ip_idx[2] == -1) &&
+ (ip_idx[3] == -1))
+ break;
+ schedule_timeout_uninterruptible(HZ);
+ } while (time_after(wtime, jiffies));
+}
+
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+ int max_ddbs;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id;
+ struct dev_db_entry *fw_ddb_entry;
+ struct ddb_entry *ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_endpoint *ep;
+ uint16_t cmds_max = 32, tmo = 0;
+ uint32_t initial_cmdsn = 0;
+ struct list_head list_st, list_nt; /* List of sendtargets */
+ struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
+ int fw_idx_size;
+ unsigned long wtime;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+ ha->is_reset = is_reset;
+ return;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_ddb_list;
+ }
+
+ INIT_LIST_HEAD(&list_st);
+ INIT_LIST_HEAD(&list_nt);
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+ fw_ddb_dma, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_st;
+
+ /* Check if ST, add to the list_st */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+ goto continue_next_st;
+
+ st_ddb_idx = vzalloc(fw_idx_size);
+ if (!st_ddb_idx)
+ break;
+
+ st_ddb_idx->fw_ddb_idx = idx;
+
+ list_add_tail(&st_ddb_idx->list, &list_st);
+continue_next_st:
+ if (next_idx == 0)
+ break;
+ }
+
+ /* Before issuing conn open mbox, ensure all IPs states are configured
+ * Note, conn open fails if IPs are not configured
+ */
+ qla4xxx_wait_for_ip_configuration(ha);
+
+ /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ }
+
+ /* Wait to ensure all sendtargets are done for min 12 sec wait */
+ tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for build ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
+ list) {
+ ret = qla4xxx_get_fwddb_entry(ha,
+ st_ddb_idx->fw_ddb_idx,
+ NULL, 0, NULL, &next_idx,
+ &state, &conn_err, NULL,
+ NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ list_del_init(&st_ddb_idx->list);
+ vfree(st_ddb_idx);
+ }
+ }
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+ /* Free up the sendtargets list */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ list_del_init(&st_ddb_idx->list);
+ vfree(st_ddb_idx);
+ }
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+ fw_ddb_dma, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_nt;
+
+ /* Check if NT, then add to list it */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+ goto continue_next_nt;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n",
+ idx));
+ if (is_reset == INIT_ADAPTER) {
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
+
+ nt_ddb_idx->fw_ddb_idx = idx;
+
+ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
+ fw_ddb_entry) == QLA_SUCCESS) {
+ vfree(nt_ddb_idx);
+ goto continue_next_nt;
+ }
+ list_add_tail(&nt_ddb_idx->list, &list_nt);
+ } else if (is_reset == RESET_ADAPTER) {
+ if (qla4xxx_is_session_exists(ha,
+ fw_ddb_entry) == QLA_SUCCESS)
+ goto continue_next_nt;
+ }
+
+ /* Create session object, with INVALID_ENTRY,
+ * the targer_id would get set when we issue the login
+ */
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
+ ha->host, cmds_max,
+ sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, INVALID_ENTRY);
+ if (!cls_sess)
+ goto exit_ddb_list;
+
+ /*
+ * iscsi_session_setup increments the driver reference
+ * count which wouldn't let the driver to be unloaded.
+ * so calling module_put function to decrement the
+ * reference count.
+ **/
+ module_put(qla4xxx_iscsi_transport.owner);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->sess = cls_sess;
+
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+ cls_conn = iscsi_conn_setup(cls_sess,
+ sizeof(struct qla_conn),
+ conn_id);
+ if (!cls_conn)
+ goto exit_ddb_list;
+
+ ddb_entry->conn = cls_conn;
+
+ /* Setup ep, for displaying attributes in sysfs */
+ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+ if (ep) {
+ ep->conn = cls_conn;
+ cls_conn->ep = ep;
+ } else {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "Unable to get ep\n"));
+ }
+
+ /* Update sess/conn params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
+ cls_conn);
+
+ if (is_reset == RESET_ADAPTER) {
+ iscsi_block_session(cls_sess);
+ /* Use the relogin path to discover new devices
+ * by short-circuting the logic of setting
+ * timer to relogin - instead set the flags
+ * to initiate login right away.
+ */
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ }
+ }
+continue_next_nt:
+ if (next_idx == 0)
+ break;
+ }
+exit_ddb_list:
+ qla4xxx_free_nt_list(&list_nt);
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+ qla4xxx_free_ddb_index(ha);
+}
+
+
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
@@ -3298,7 +4236,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
* firmware
* NOTE: interrupts enabled upon successful completion
*/
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
@@ -3319,7 +4257,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
continue;
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
}
if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4324,16 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
ha->patch_number, ha->build_number);
- qla4xxx_create_chap_list(ha);
-
if (qla4xxx_setup_boot_info(ha))
ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
__func__);
+ /* Perform the build ddb list and login to each */
+ qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+ iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+
+ qla4xxx_create_chap_list(ha);
+
qla4xxx_create_ifaces(ha);
return 0;
@@ -3449,6 +4391,38 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
}
}
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry;
+ int options;
+ int idx;
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if ((ddb_entry != NULL) &&
+ (ddb_entry->ddb_type == FLASH_DDB)) {
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
+ == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
+ __func__);
+
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+ /*
+ * we have decremented the reference count of the driver
+ * when we setup the session to have the driver unload
+ * to be seamless without actually destroying the
+ * session
+ **/
+ try_module_get(qla4xxx_iscsi_transport.owner);
+ iscsi_destroy_endpoint(ddb_entry->conn->ep);
+ qla4xxx_free_ddb(ha, ddb_entry);
+ iscsi_session_teardown(ddb_entry->sess);
+ }
+ }
+}
/**
* qla4xxx_remove_adapter - calback function to remove adapter.
* @pci_dev: PCI device pointer
@@ -3465,9 +4439,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
/* destroy iface from sysfs */
qla4xxx_destroy_ifaces(ha);
- if (ha->boot_kset)
+ if ((!ql4xdisablesysfsboot) && ha->boot_kset)
iscsi_boot_destroy_kset(ha->boot_kset);
+ qla4xxx_destroy_fw_ddb_session(ha);
+
scsi_remove_host(ha->host);
qla4xxx_free_adapter(ha);
@@ -4115,7 +5091,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
qla4_8xxx_idc_unlock(ha);
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
qla4_8xxx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
@@ -4151,7 +5127,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
if (rval == QLA_SUCCESS) {
ret = qla4xxx_request_irqs(ha);
if (ret) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index c15347d3f53..5254e57968f 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a1fd73df541..8ba4510a951 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
depends on FSL_SOC
config SPI_FSL_SPI
- tristate "Freescale SPI controller"
+ bool "Freescale SPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
config SPI_FSL_ESPI
- tristate "Freescale eSPI controller"
+ bool "Freescale eSPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 024b48aed5c..acc88b4d286 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -13,6 +13,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index e093d3ec41b..0094c645ff0 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
spi_bitbang_cleanup(spi);
}
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
{
int value;
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
return value;
}
-static int __init
+static int __devinit
spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
u16 *res_flags)
{
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index e763254741c..182e9c87382 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -426,7 +427,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
goto err_clk;
}
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
nuc900_init_spi(hw);
err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 84c934c0a54..520e8286db2 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -517,10 +517,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
- ssb_pcicore_fix_sprom_core_index(pc);
+ struct ssb_device *pdev = pc->dev;
+ struct ssb_bus *bus = pdev->bus;
+
+ if (bus->bustype == SSB_BUSTYPE_PCI)
+ ssb_pcicore_fix_sprom_core_index(pc);
/* Disable PCI interrupts. */
- ssb_write32(pc->dev, SSB_INTVEC, 0);
+ ssb_write32(pdev, SSB_INTVEC, 0);
/* Additional PCIe always once-executed workarounds */
if (pc->dev->id.coreid == SSB_DEV_PCIE) {
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 21d8c1c16cd..5e78c77d5a0 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
}
insns =
- kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL);
+ kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
if (!insns) {
DPRINTK("kmalloc failed\n");
ret = -ENOMEM;
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return ret;
}
-static void comedi_unmap(struct vm_area_struct *area)
+
+static void comedi_vm_open(struct vm_area_struct *area)
+{
+ struct comedi_async *async;
+ struct comedi_device *dev;
+
+ async = area->vm_private_data;
+ dev = async->subdevice->device;
+
+ mutex_lock(&dev->mutex);
+ async->mmap_count++;
+ mutex_unlock(&dev->mutex);
+}
+
+static void comedi_vm_close(struct vm_area_struct *area)
{
struct comedi_async *async;
struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
}
static struct vm_operations_struct comedi_vm_ops = {
- .close = comedi_unmap,
+ .open = comedi_vm_open,
+ .close = comedi_vm_close,
};
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_async *async = NULL;
unsigned long start = vma->vm_start;
unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
int i;
int retval;
struct comedi_subdevice *s;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+
+ dev_file_info = comedi_get_device_file_info(minor);
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
{
unsigned int mask = 0;
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *read_subdev;
struct comedi_subdevice *write_subdev;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy)
break;
if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy) {
retval = 0;
break;
@@ -1885,11 +1924,17 @@ ok:
static int comedi_close(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *s = NULL;
int i;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
static int comedi_fasync(int fd, struct file *file, int on)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
return fasync_helper(fd, file, on, &dev->async_queue);
}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index a8fea9a9173..6144afb8cba 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
#define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
/*
@@ -25,7 +25,7 @@ Driver: usbduxsigma
Description: University of Stirling USB DAQ & INCITE Technology Limited
Devices: [ITL] USB-DUX (usbduxsigma.o)
Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 21 Jul 2011
+Updated: 8 Nov 2011
Status: testing
*/
/*
@@ -44,6 +44,7 @@ Status: testing
* 0.3: proper vendor ID and driver name
* 0.4: fixed D/A voltage range
* 0.5: various bug fixes, health check at startup
+ * 0.6: corrected wrong input range
*/
/* generates loads of debug info */
@@ -175,7 +176,7 @@ Status: testing
/* comedi constants */
static const struct comedi_lrange range_usbdux_ai_range = { 1, {
BIP_RANGE
- (2.65)
+ (2.65/2.0)
}
};
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 26564094e33..aec9311b108 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -242,25 +242,24 @@ static const struct file_operations iio_event_chrdev_fileops = {
static int iio_event_getfd(struct iio_dev *indio_dev)
{
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
int fd;
- if (indio_dev->event_interface == NULL)
+ if (ev_int == NULL)
return -ENODEV;
- mutex_lock(&indio_dev->event_interface->event_list_lock);
- if (test_and_set_bit(IIO_BUSY_BIT_POS,
- &indio_dev->event_interface->flags)) {
- mutex_unlock(&indio_dev->event_interface->event_list_lock);
+ mutex_lock(&ev_int->event_list_lock);
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+ mutex_unlock(&ev_int->event_list_lock);
return -EBUSY;
}
- mutex_unlock(&indio_dev->event_interface->event_list_lock);
+ mutex_unlock(&ev_int->event_list_lock);
fd = anon_inode_getfd("iio:event",
- &iio_event_chrdev_fileops,
- indio_dev->event_interface, O_RDONLY);
+ &iio_event_chrdev_fileops, ev_int, O_RDONLY);
if (fd < 0) {
- mutex_lock(&indio_dev->event_interface->event_list_lock);
+ mutex_lock(&ev_int->event_list_lock);
clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- mutex_unlock(&indio_dev->event_interface->event_list_lock);
+ mutex_unlock(&ev_int->event_list_lock);
}
return fd;
}
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index fb2e89c3056..5385da2e9cd 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
{USB_DEVICE(0x0DF6, 0x0045)},
{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
{USB_DEVICE(0x0DF6, 0x004B)},
+ {USB_DEVICE(0x0DF6, 0x005D)},
{USB_DEVICE(0x0DF6, 0x0063)},
/* Sweex */
{USB_DEVICE(0x177F, 0x0154)},
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 8a7803cf88d..a7feb3e328a 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -1019,6 +1019,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
if (IS_ERR(th)) {
printk(KERN_ERR "Unable to start the device-scanning thread\n");
+ complete(&dev->scanning_done);
quiesce_and_remove_host(dev);
err = PTR_ERR(th);
goto errout;
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 3d1279c424a..7eb56178fb6 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -54,6 +54,7 @@
/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
#define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS 4
/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
#define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
*/
void dsp_clk_exit(void)
{
+ int i;
+
dsp_clock_disable_all(dsp_clocks);
+ for (i = 0; i < DM_TIMER_CLOCKS; i++)
+ omap_dm_timer_free(timer[i]);
+
clk_put(iva2_clk);
clk_put(ssi.sst_fck);
clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
void dsp_clk_init(void)
{
static struct platform_device dspbridge_device;
+ int i, id;
dspbridge_device.dev.bus = &platform_bus_type;
+ for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+ timer[i] = omap_dm_timer_request_specific(id);
+
iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
if (IS_ERR(iva2_clk))
dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
clk_enable(iva2_clk);
break;
case GPT_CLK:
- timer[clk_id - 1] =
- omap_dm_timer_request_specific(DMT_ID(clk_id));
+ status = omap_dm_timer_start(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
clk_disable(iva2_clk);
break;
case GPT_CLK:
- omap_dm_timer_free(timer[clk_id - 1]);
+ status = omap_dm_timer_stop(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index c43c7e3421c..76cfc6edecd 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -24,11 +24,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
-
-#ifdef MODULE
#include <linux/module.h>
-#endif
-
#include <linux/device.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 09c44abb89e..3872b8cccdc 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
{
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
+ unsigned long flags;
spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
{
struct vhci_unlink *unlink;
struct urb *urb;
+ unsigned long flags;
usbip_dump_header(pdu);
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
urb->status = pdu->u.ret_unlink.status;
pr_info("urb->status %d\n", urb->status);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0fd96c10271..8599545cdf9 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -614,13 +614,12 @@ int iscsit_add_reject(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
if (add_to_conn) {
spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
" non-existent or non-exported iSCSI LUN:"
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
}
- if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
-
send_check_condition = 1;
goto attach_cmd;
}
@@ -1044,6 +1037,8 @@ done:
*/
send_check_condition = 1;
} else {
+ cmd->data_length = cmd->se_cmd.data_length;
+
if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
* the backend memory allocation.
*/
ret = transport_generic_new_cmd(&cmd->se_cmd);
- if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+ if (ret < 0) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
- (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+ (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
}
hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
hdr->response = cmd->iscsi_response;
hdr->cmd_status = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
hdr = (struct iscsi_tm_rsp *) cmd->pdu;
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
hdr->itt = cpu_to_be32(cmd->init_task_tag);
cmd->stat_sn = conn->stat_sn++;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index beb39469e7f..1cd6ce373b8 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -30,9 +30,11 @@
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
- int j = DIV_ROUND_UP(len, 2);
+ int j = DIV_ROUND_UP(len, 2), rc;
- hex2bin(dst, src, j);
+ rc = hex2bin(dst, src, j);
+ if (rc < 0)
+ pr_debug("CHAP string contains non hex digit symbols\n");
dst[j] = '\0';
return j;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 3723d90d5ae..f1a02dad05a 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -398,7 +398,6 @@ struct iscsi_cmd {
u32 pdu_send_order;
/* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
u32 pdu_start;
- u32 residual_count;
/* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
u32 seq_send_order;
/* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
atomic_t connection_exit;
atomic_t connection_recovery;
atomic_t connection_reinstatement;
- atomic_t connection_wait;
atomic_t connection_wait_rcfr;
atomic_t sleep_on_conn_wait_comp;
atomic_t transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
atomic_t session_reinstatement;
atomic_t session_stop_active;
atomic_t sleep_on_sess_wait_comp;
- atomic_t transport_wait_cmds;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index c4c68da3e50..101b1beb3bc 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
- if (se_cmd->se_cmd_flags &
- SCF_SCSI_RESERVATION_CONFLICT) {
+ if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index daad362a93c..d734bdec24f 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Could not allocate memory for session\n");
- return -1;
+ return -ENOMEM;
}
iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
pr_err("idr_pre_get() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
spin_lock(&sess_idr_lock);
idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
sess->se_sess = transport_init_session();
- if (!sess->se_sess) {
+ if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 426cd4bf6a9..98936cb7c29 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
return NULL;
}
- login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
if (!login->req) {
pr_err("Unable to allocate memory for Login Request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
goto out;
}
- memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
if (!login->req_buf) {
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3df1c9b8ae6..81d5832fbbd 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
&tl_cmd->tl_sense_buf[0]);
- /*
- * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
- */
if (scsi_bidi_cmnd(sc))
- se_cmd->t_tasks_bidi = 1;
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
* Allocate the necessary tasks to complete the received CDB+data
*/
ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
- if (ret == -ENOMEM) {
- /* Out of Resources */
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- } else if (ret == -EINVAL) {
- /*
- * Handle case for SAM_STAT_RESERVATION_CONFLICT
- */
- if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
- /*
- * Otherwise, return SAM_STAT_CHECK_CONDITION and return
- * sense data.
- */
- return PYX_TRANSPORT_USE_SENSE_REASON;
- }
-
+ if (ret != 0)
+ return ret;
/*
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
- if (se_cmd->t_tasks_bidi) {
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
struct scsi_data_buffer *sdb = scsi_in(sc);
sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
}
/* Tell the core about our preallocated memory */
- ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+ return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
- return 0;
}
/*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
- int host_no = tl_hba->sh->host_no;
+
+ pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+ " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+ tl_hba->tl_wwn_address, tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
-
- pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
- " SAS Address: %s at Linux/SCSI Host ID: %d\n",
- config_item_name(&wwn->wwn_group.cg_item), host_no);
}
/* Start items for tcm_loop_cit */
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 88f2ad43ec8..1dcbef499d6 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
- if (!l_port)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+ if (!l_port) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
buf = transport_kmap_first_data_page(cmd);
/*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!rc) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
} else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
}
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct t10_alua_lu_gp.
*/
spin_lock(&lu_gps_lock);
- atomic_set(&lu_gp->lu_gp_shutdown, 1);
list_del(&lu_gp->lu_gp_node);
alua_lu_gps_count--;
spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem->tg_pt = port;
port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
- atomic_set(&port->sep_tg_pt_gp_active, 1);
return tg_pt_gp_mem;
}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 683ba02b824..831468b3163 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
if (cmd->data_length < 60)
return 0;
- buf[2] = 0x3c;
+ buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
if (cmd->data_length < 4) {
pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
ret = -EINVAL;
out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
cdb[2] & 0x3f, cdb[3]);
- return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+ cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+ return -EINVAL;
}
offset += length;
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -ENOSYS;
}
buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
/* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
int target_emulate_synchronize_cache(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
+ struct se_cmd *cmd = task->task_se_cmd;
if (!dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
dev->transport->do_sync_cache(task);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e0c1e8a8dd4..93d4f6a1b79 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
" struct se_subsystem_dev\n");
goto unlock;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
" from allocate_virtdevice()\n");
goto out;
}
- spin_lock(&se_device_lock);
- list_add_tail(&se_dev->se_dev_node, &se_dev_list);
- spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
- spin_lock(&se_device_lock);
- list_del(&se_dev->se_dev_node);
- spin_unlock(&se_device_lock);
-
dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ba5edec2c5f..9b863942547 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -708,7 +705,7 @@ done:
se_task->task_scsi_status = GOOD;
transport_complete_task(se_task, 1);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("dpo_emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("dpo_emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
return -EINVAL;
}
- if (dev->transport->fua_write_emulated == 0) {
+ if (flag && dev->transport->fua_write_emulated == 0) {
pr_err("fua_write_emulated not supported\n");
return -EINVAL;
}
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("ua read emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("ua read emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->write_cache_emulated == 0) {
+ if (flag && dev->transport->write_cache_emulated == 0) {
pr_err("write_cache_emulated not supported\n");
return -EINVAL;
}
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
ret = -ENOMEM;
goto out;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 67cd6fe05bf..b4864fba4ef 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- cmd->t_tasks_fua) {
+ (cmd->se_cmd_flags & SCF_FUA)) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
}
- if (ret < 0)
+ if (ret < 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
+ }
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* fd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7698efe2926..4aa99220443 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
*/
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- task->task_se_cmd->t_tasks_fua))
+ (cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA;
else
rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOSYS;
}
bio = iblock_get_bio(task, block_lba, sg_num);
- if (!bio)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ if (!bio) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
+ }
bio_list_init(&list);
bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
submit_bio(rw, bio);
blk_finish_plug(&plug);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static u32 iblock_get_device_rev(struct se_device *dev)
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5a4ebfc3a54..95dee7074ae 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
- *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return true;
}
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
- ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out_unlock;
}
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_tpg_undepend_item(tmp_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
if (!dest_tpg) {
pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
smp_mb__after_atomic_dec();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* Do nothing but return GOOD status.
*/
if (!sa_res_key)
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
if (!spec_i_pt) {
/*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
if (ret != 0) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
} else {
/*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
" 0x%016Lx\n", res_key,
pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
}
if (spec_i_pt) {
pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
/*
* Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
pr_err("Unable to allocate"
" pr_aptpl_buf\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
}
/*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
default:
pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
int prh_type = 0, prh_scope = 0, ret;
- if (!se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (pr_reg_n->pr_res_key != res_key) {
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
INIT_LIST_HEAD(&preempt_and_abort_list);
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
dest_tf_ops->get_fabric_name());
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
dest_node_acl = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3553,7 +3615,8 @@ after_iport_check:
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3585,7 +3649,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3603,7 +3668,8 @@ after_iport_check:
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
sa_res_key, 0, aptpl, 2, 1);
if (ret != 0) {
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = EINVAL;
goto out;
}
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!cmd->se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!cmd->se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ed32e1efe42..8b15e56b038 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
struct bio **hbio)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
u32 task_sg_num = task->task_sg_nents;
struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
u32 data_len = task->task_size, i, len, bytes, off;
int nr_pages = (task->task_size + task_sg[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
- int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ int nr_vecs = 0, rc;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
*hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return ret;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static int pscsi_do_task(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENODEV;
}
} else {
BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
* Setup the main struct request for the task->task_sg[] payload
*/
ret = pscsi_map_sg(task, task->task_sg, &hbio);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (ret < 0) {
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return ret;
+ }
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
/* pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_UNSUPPORTED_SCSI_OPCODE;
transport_complete_task(task, 0);
break;
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 5158d3846f1..02e51faa2f4 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
-/* rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
+ struct scatterlist *rd_sg;
+ struct sg_mapping_iter m;
u32 rd_offset = req->rd_offset;
+ u32 src_len;
table = rd_get_sg_table(dev, req->rd_page);
if (!table)
return -EINVAL;
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = task->task_sg;
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+ rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
- pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
- " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- src_offset = rd_offset;
+ pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+ dev->rd_dev_id, read_rd ? "Read" : "Write",
+ task->task_lba, req->rd_size, req->rd_page,
+ rd_offset);
+ src_len = PAGE_SIZE - rd_offset;
+ sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+ read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (req->rd_size) {
- if ((sg_d[i].length - dst_offset) <
- (sg_s[j].length - src_offset)) {
- length = (sg_d[i].length - dst_offset);
-
- pr_debug("Step 1 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
- sg_s[j].length);
- pr_debug("Step 1 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i++]) + dst_offset;
- BUG_ON(!dst);
-
- src = sg_virt(&sg_s[j]) + src_offset;
- BUG_ON(!src);
-
- dst_offset = 0;
- src_offset = length;
- page_end = 0;
- } else {
- length = (sg_s[j].length - src_offset);
-
- pr_debug("Step 2 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset,
- j, sg_s[j].length);
- pr_debug("Step 2 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i]) + dst_offset;
- BUG_ON(!dst);
-
- if (sg_d[i].length == length) {
- i++;
- dst_offset = 0;
- } else
- dst_offset = length;
-
- src = sg_virt(&sg_s[j++]) + src_offset;
- BUG_ON(!src);
-
- src_offset = 0;
- page_end = 1;
- }
+ u32 len;
+ void *rd_addr;
- memcpy(dst, src, length);
+ sg_miter_next(&m);
+ len = min((u32)m.length, src_len);
+ m.consumed = len;
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
+ rd_addr = sg_virt(rd_sg) + rd_offset;
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
+ if (read_rd)
+ memcpy(m.addr, rd_addr, len);
+ else
+ memcpy(rd_addr, m.addr, len);
- if (!page_end)
+ req->rd_size -= len;
+ if (!req->rd_size)
continue;
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ src_len -= len;
+ if (src_len) {
+ rd_offset += len;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- sg_s = &table->sg_table[j = 0];
- }
-
- return 0;
-}
-
-/* rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
- struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
- struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
- u32 rd_offset = req->rd_offset;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
- sg_s = task->task_sg;
-
- pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
- " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- dst_offset = rd_offset;
-
- while (req->rd_size) {
- if ((sg_s[i].length - src_offset) <
- (sg_d[j].length - dst_offset)) {
- length = (sg_s[i].length - src_offset);
-
- pr_debug("Step 1 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 1 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i++]) + src_offset;
- BUG_ON(!src);
-
- dst = sg_virt(&sg_d[j]) + dst_offset;
- BUG_ON(!dst);
-
- src_offset = 0;
- dst_offset = length;
- page_end = 0;
- } else {
- length = (sg_d[j].length - dst_offset);
-
- pr_debug("Step 2 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 2 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i]) + src_offset;
- BUG_ON(!src);
-
- if (sg_s[i].length == length) {
- i++;
- src_offset = 0;
- } else
- src_offset = length;
-
- dst = sg_virt(&sg_d[j++]) + dst_offset;
- BUG_ON(!dst);
-
- dst_offset = 0;
- page_end = 1;
- }
-
- memcpy(dst, src, length);
-
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
-
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
-
- if (!page_end)
- continue;
-
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ /* rd page completed, next one please */
+ req->rd_page++;
+ rd_offset = 0;
+ src_len = PAGE_SIZE;
+ if (req->rd_page <= table->page_end_offset) {
+ rd_sg++;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
+ if (!table) {
+ sg_miter_stop(&m);
return -EINVAL;
+ }
- sg_d = &table->sg_table[j = 0];
+ /* since we increment, the first sg entry is correct */
+ rd_sg = table->sg_table;
}
-
+ sg_miter_stop(&m);
return 0;
}
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct rd_request *req = RD_REQ(task);
- unsigned long long lba;
+ u64 tmp;
int ret;
- req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
- lba = task->task_lba;
- req->rd_offset = (do_div(lba,
- (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
- dev->se_sub_dev->se_dev_attrib.block_size;
+ tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+ req->rd_offset = do_div(tmp, PAGE_SIZE);
+ req->rd_page = tmp;
req->rd_size = task->task_size;
- if (task->task_data_direction == DMA_FROM_DEVICE)
- ret = rd_MEMCPY_read(req);
- else
- ret = rd_MEMCPY_write(req);
-
+ ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
if (ret != 0)
return ret;
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* rd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 217e29df629..684522805a1 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, cmd->t_state,
atomic_read(&cmd->t_fe_count));
- /*
- * Signal that the command has failed via cmd->se_cmd_flags,
- */
- transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&cmd->t_fe_count));
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3400ae6e93f..0257658e2e3 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -61,7 +61,6 @@
static int sub_api_initialized;
static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
static void transport_put_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
- se_cmd_cache = kmem_cache_create("se_cmd_cache",
- sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
- if (!se_cmd_cache) {
- pr_err("kmem_cache_create for struct se_cmd failed\n");
- goto out;
- }
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
- goto out_free_cmd_cache;
+ goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
kmem_cache_destroy(se_sess_cache);
out_free_tmr_req_cache:
kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
- kmem_cache_destroy(se_cmd_cache);
out:
return -ENOMEM;
}
@@ -191,7 +182,6 @@ out:
void release_se_kmem_caches(void)
{
destroy_workqueue(target_completion_wq);
- kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
task->task_scsi_status = GOOD;
} else {
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_ILLEGAL_REQUEST;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
}
transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- transport_generic_request_failure(cmd, 1, 1);
+ transport_generic_request_failure(cmd);
}
/* transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
if (cmd->t_tasks_failed) {
if (!task->task_error_status) {
task->task_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
- atomic_set(&dev->active_cmds, 0);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->execute_task_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
- spin_lock_init(&dev->ordered_cmd_lock);
- spin_lock_init(&dev->state_task_lock);
- spin_lock_init(&dev->dev_alua_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->dev_status_lock);
- spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
{
INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
- INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
} else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, 0,
- (cmd->data_direction != DMA_TO_DEVICE));
- }
+ if (ret < 0)
+ transport_generic_request_failure(cmd);
+
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
-static void transport_generic_request_failure(
- struct se_cmd *cmd,
- int complete,
- int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state,
- cmd->transport_error_status);
+ cmd->t_state, cmd->scsi_sense_reason);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
- if (complete) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- }
-
- switch (cmd->transport_error_status) {
- case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- break;
- case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
- cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
- break;
- case PYX_TRANSPORT_INVALID_CDB_FIELD:
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- break;
- case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- break;
- case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
- if (!sc)
- transport_new_cmd_failure(cmd);
- /*
- * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
- * we force this session to fall back to session
- * recovery.
- */
- cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
- cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
- goto check_stop;
- case PYX_TRANSPORT_LU_COMM_FAILURE:
- case PYX_TRANSPORT_ILLEGAL_REQUEST:
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
- case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
- cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
- break;
- case PYX_TRANSPORT_WRITE_PROTECTED:
- cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ switch (cmd->scsi_sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+ case TCM_UNSUPPORTED_SCSI_OPCODE:
+ case TCM_INVALID_CDB_FIELD:
+ case TCM_INVALID_PARAMETER_LIST:
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ case TCM_UNKNOWN_MODE_PAGE:
+ case TCM_WRITE_PROTECTED:
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ case TCM_CHECK_CONDITION_NOT_READY:
break;
- case PYX_TRANSPORT_RESERVATION_CONFLICT:
+ case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
* and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
- case PYX_TRANSPORT_USE_SENSE_REASON:
- /*
- * struct se_cmd->scsi_sense_reason already set
- */
- break;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
- cmd->t_task_cdb[0],
- cmd->transport_error_status);
+ cmd->t_task_cdb[0], cmd->scsi_sense_reason);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
* transport_send_check_condition_and_sense() after handling
* possible unsoliticied write data payloads.
*/
- if (!sc && !cmd->se_tfo->new_cmd_map)
- transport_new_cmd_failure(cmd);
- else {
- ret = transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
- goto queue_full;
- }
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
check_stop:
transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_inc(&cmd->se_dev->dev_hoq_count);
- smp_mb__after_atomic_inc();
pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&cmd->se_dev->ordered_cmd_lock);
- list_add_tail(&cmd->se_ordered_node,
- &cmd->se_dev->ordered_cmd_list);
- spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
- if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, 0, 1);
+ if (se_dev_check_online(cmd->se_dev) != 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ transport_generic_request_failure(cmd);
return 0;
}
@@ -2163,14 +2102,13 @@ check_depth:
else
error = dev->transport->do_task(task);
if (error != 0) {
- cmd->transport_error_status = error;
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
atomic_inc(&dev->depth_left);
- transport_generic_request_failure(cmd, 0, 1);
+ transport_generic_request_failure(cmd);
}
goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
return 0;
}
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
- unsigned long flags;
- /*
- * Any unsolicited data will get dumped for failed command inside of
- * the fabric plugin
- */
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
/*
* Everything else assume TYPE_DISK Sector CDB location.
- * Use 8-bit sector value.
+ * Use 8-bit sector value. SBC-3 says:
+ *
+ * A TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be written. Any other value
+ * specifies the number of logical blocks that shall be
+ * written.
*/
type_disk:
- return (u32)cdb[4];
+ return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
return -1;
}
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- /*
- * For UA Interlock Code 11b, a RESERVATION CONFLICT will
- * establish a UNIT ATTENTION with PREVIOUS RESERVATION
- * CONFLICT STATUS.
- *
- * See spc4r17, section 7.4.6 Control Mode Page, Table 349
- */
- if (cmd->se_sess &&
- cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
- cmd->orig_fe_lun, 0x2C,
- ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- return -EINVAL;
-}
-
static inline long long transport_dev_end_lba(struct se_device *dev)
{
return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
*/
if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0)
- return transport_handle_reservation_conflict(cmd);
+ cmd, cdb, pr_reg_type) != 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EBUSY;
+ }
/*
* This means the CDB is allowed for the SCSI Initiator port
* when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(cmd->t_tasks_bidi))
+ !(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
* completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[10] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_dec(&dev->dev_hoq_count);
- smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&dev->ordered_cmd_lock);
- list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
- spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length) {
ret = transport_generic_get_mem(cmd);
if (ret < 0)
- return ret;
+ goto out_fail;
}
/*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
task_cdbs = transport_allocate_control_task(cmd);
}
- if (task_cdbs <= 0)
+ if (task_cdbs < 0)
goto out_fail;
+ else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
+ INIT_WORK(&cmd->work, target_complete_ok_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return 0;
+ }
if (set_counts) {
atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
else if (ret < 0)
return ret;
- return PYX_TRANSPORT_WRITE_PENDING;
+ return 1;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
atomic_inc(&cmd->t_transport_aborted);
smp_mb__after_atomic_inc();
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
- transport_new_cmd_failure(cmd);
- return;
}
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
struct se_cmd *cmd;
struct se_device *dev = (struct se_device *) param;
- set_user_nice(current, -20);
-
while (!kthread_should_stop()) {
ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
}
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
break;
}
ret = transport_generic_new_cmd(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
+ break;
}
break;
case TRANSPORT_PROCESS_WRITE:
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 4fac37c4c61..71fc9cea5dc 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
if (!fp)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return -ENOMEM; /* Signal QUEUE_FULL */
txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
memset(txrdy, 0, sizeof(*txrdy));
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 5f770412ca4..9402b7387ca 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
- pr_debug("del lport %s\n",
- config_item_name(&wwn->wwn_group.cg_item));
+ pr_debug("del lport %s\n", lacl->name);
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
mutex_unlock(&ft_lport_lock);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e8c564a5334..a8078d0638f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1458,6 +1458,16 @@ static const struct usb_device_id acm_ids[] = {
},
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
+ /* Motorola H24 HSPA module: */
+ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
+ { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
+ { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
data interface instead of
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 4730016d7cd..45f422ac103 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
u32 tmp;
if (!driver || !bind || !driver->setup
- || driver->speed != USB_SPEED_HIGH)
+ || driver->speed < USB_SPEED_HIGH)
return -EINVAL;
if (!dev)
return -ENODEV;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index c39d58860fa..1a6f415c0d0 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2975,6 +2975,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
fsg_common_put(common);
usb_free_descriptors(fsg->function.descriptors);
usb_free_descriptors(fsg->function.hs_descriptors);
+ usb_free_descriptors(fsg->function.ss_descriptors);
kfree(fsg);
}
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 91fdf790ed2..cf33a8d0fd5 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
if (!gser->port.in->desc || !gser->port.out->desc) {
DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
- if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
- !config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+ if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+ config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
gser->port.in->desc = NULL;
gser->port.out->desc = NULL;
return -EINVAL;
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 43a49ecc1f3..dcbc0a2e48d 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/fsl_devices.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
#include <mach/hardware.h>
@@ -88,7 +89,6 @@ eenahb:
void fsl_udc_clk_finalize(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-#if defined(CONFIG_SOC_IMX35)
if (cpu_is_mx35()) {
unsigned int v;
@@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev)
USBPHYCTRL_OTGBASE_OFFSET));
}
}
-#endif
/* ULPI transceivers don't need usbpll */
if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 2a03e4de11c..e00cf92409c 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index b3b3d83b7c3..dd28ef3def7 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
kfree(req);
}
-/*-------------------------------------------------------------------------*/
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+ struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+ /* Clear active and halt bit */
+ qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+ | EP_QUEUE_HEAD_STATUS_HALT));
+
+ /* Ensure that updates to the QH will occur before priming. */
+ wmb();
+
+ /* Prime endpoint by writing correct bit to ENDPTPRIME */
+ fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+ : (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
{
- int i = ep_index(ep) * 2 + ep_is_in(ep);
u32 temp, bitmask, tmp_stat;
- struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
/* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
@@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
/* Read prime bit, if 1 goto done */
if (fsl_readl(&dr_regs->endpointprime) & bitmask)
- goto out;
+ return;
do {
/* Set ATDTW bit in USBCMD */
@@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
if (tmp_stat)
- goto out;
+ return;
}
- /* Write dQH next pointer and terminate bit to 0 */
- temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dQH->next_dtd_ptr = cpu_to_hc32(temp);
-
- /* Clear active and halt bit */
- temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
- | EP_QUEUE_HEAD_STATUS_HALT));
- dQH->size_ioc_int_sts &= temp;
-
- /* Ensure that updates to the QH will occur before priming. */
- wmb();
-
- /* Prime endpoint by writing 1 to ENDPTPRIME */
- temp = ep_is_in(ep)
- ? (1 << (ep_index(ep) + 16))
- : (1 << (ep_index(ep)));
- fsl_writel(temp, &dr_regs->endpointprime);
-out:
- return;
+ fsl_prime_ep(ep, req->head);
}
/* Fill in the dTD structure
@@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
VDBG("%s, bad ep", __func__);
return -EINVAL;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
}
@@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
- struct ep_queue_head *qh;
struct fsl_req *next_req;
- qh = ep->qh;
next_req = list_entry(req->queue.next, struct fsl_req,
queue);
- /* Point the QH to the first TD of next request */
- fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
+ /* prime with dTD of next request */
+ fsl_prime_ep(ep, next_req->head);
}
-
- /* The request hasn't been processed, patch up the TD chain */
+ /* The request hasn't been processed, patch up the TD chain */
} else {
struct fsl_req *prev_req;
prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
- fsl_writel(fsl_readl(&req->tail->next_td_ptr),
- &prev_req->tail->next_td_ptr);
-
+ prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
}
done(ep, req, -ECONNRESET);
@@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
goto out;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
status = -EOPNOTSUPP;
goto out;
}
@@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
struct fsl_udc *udc;
int size = 0;
u32 bitmask;
- struct ep_queue_head *d_qh;
+ struct ep_queue_head *qh;
ep = container_of(_ep, struct fsl_ep, ep);
if (!_ep || (!ep->desc && ep_index(ep) != 0))
@@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)];
+ qh = get_qh_by_ep(ep);
bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
(1 << (ep_index(ep)));
if (fsl_readl(&dr_regs->endptstatus) & bitmask)
- size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+ size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
>> DTD_LENGTH_BIT_POS;
pr_debug("%s %u\n", __func__, size);
@@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 1d51be83fda..f781f5dec41 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
* 2 + ((windex & USB_DIR_IN) ? 1 : 0))
#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+ /* we only have one ep0 structure but two queue heads */
+ if (ep_index(ep) != 0)
+ return ep->qh;
+ else
+ return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+ USB_DIR_IN) ? 1 : 0];
+}
+
struct platform_device;
#ifdef CONFIG_ARCH_MXC
int fsl_udc_clk_init(struct platform_device *pdev);
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 91d0af2a24a..9aa1cbbee45 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->speed < USB_SPEED_HIGH
|| !bind
|| !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 7f1bc9a73cd..da2b9d0be3c 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
* (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
* "must not be used in normal operation"
*/
- if (!driver || driver->speed != USB_SPEED_HIGH
+ if (!driver || driver->speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 24f84b210ce..fc719a3f855 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1746,7 +1746,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
if (!r8a66597)
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index a552453dc94..b31448229f0 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
return -EINVAL;
}
- if (driver->speed != USB_SPEED_HIGH &&
- driver->speed != USB_SPEED_FULL) {
+ if (driver->speed < USB_SPEED_FULL)
dev_err(hsotg->dev, "%s: bad speed\n", __func__);
- }
if (!bind || !driver->setup) {
dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 8d54f893cef..20a553b46ae 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -1142,8 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
int ret;
if (!driver
- || (driver->speed != USB_SPEED_FULL &&
- driver->speed != USB_SPEED_HIGH)
+ || driver->speed < USB_SPEED_FULL
|| !bind
|| !driver->unbind || !driver->disconnect || !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 56a32033adb..a60679cbbf8 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1475,6 +1475,7 @@ iso_stream_schedule (
* jump until after the queue is primed.
*/
else {
+ int done = 0;
start = SCHEDULE_SLOP + (now & ~0x07);
/* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
@@ -1492,18 +1493,18 @@ iso_stream_schedule (
if (stream->highspeed) {
if (itd_slot_ok(ehci, mod, start,
stream->usecs, period))
- break;
+ done = 1;
} else {
if ((start % 8) >= 6)
continue;
if (sitd_slot_ok(ehci, mod, stream,
start, sched, period))
- break;
+ done = 1;
}
- } while (start > next);
+ } while (start > next && !done);
/* no room in the schedule */
- if (start == next) {
+ if (!done) {
ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
urb, now, now + mod);
status = -ENOSPC;
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index d6e17542861..a403b53e86b 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
{
qset->td_start = qset->td_end = qset->ntds = 0;
- qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+ qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
qset->qh.err_count = 0;
qset->qh.scratch[0] = 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index aa94c019579..a1afb7c39f7 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
ring = xhci->cmd_ring;
seg = ring->deq_seg;
do {
- memset(seg->trbs, 0, SEGMENT_SIZE);
+ memset(seg->trbs, 0,
+ sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+ cpu_to_le32(~TRB_CYCLE);
seg = seg->next;
} while (seg != ring->deq_seg);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c1fa12ec7a9..b63ab157010 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2301,18 +2301,12 @@ static int musb_suspend(struct device *dev)
*/
}
- musb_save_context(musb);
-
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
}
static int musb_resume_noirq(struct device *dev)
{
- struct musb *musb = dev_to_musb(dev);
-
- musb_restore_context(musb);
-
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
* module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index d51043acfe1..922148ff8d2 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1903,7 +1903,7 @@ static int musb_gadget_start(struct usb_gadget *g,
unsigned long flags;
int retval = -EINVAL;
- if (driver->speed != USB_SPEED_HIGH)
+ if (driver->speed < USB_SPEED_HIGH)
goto err0;
pm_runtime_get_sync(musb->controller);
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 053f86d7000..ad96a389672 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -349,7 +349,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
if (mod->irq_attch)
intenb1 |= ATTCHE;
- if (mod->irq_attch)
+ if (mod->irq_dtch)
intenb1 |= DTCHE;
if (mod->irq_sign)
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index d9717e0bc1f..7f4e8033857 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -751,53 +751,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
- int ret;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->setup ||
- driver->speed != USB_SPEED_HIGH)
+ driver->speed < USB_SPEED_FULL)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
/* first hook up the driver ... */
gpriv->driver = driver;
gpriv->gadget.dev.driver = &driver->driver;
- ret = device_add(&gpriv->gadget.dev);
- if (ret) {
- dev_err(dev, "device_add error %d\n", ret);
- goto add_fail;
- }
-
return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
-
-add_fail:
- gpriv->driver = NULL;
- gpriv->gadget.dev.driver = NULL;
-
- return ret;
}
static int usbhsg_gadget_stop(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->unbind)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
- device_del(&gpriv->gadget.dev);
+ gpriv->gadget.dev.driver = NULL;
gpriv->driver = NULL;
return 0;
@@ -827,6 +806,13 @@ static int usbhsg_start(struct usbhs_priv *priv)
static int usbhsg_stop(struct usbhs_priv *priv)
{
+ struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+ /* cable disconnect */
+ if (gpriv->driver &&
+ gpriv->driver->disconnect)
+ gpriv->driver->disconnect(&gpriv->gadget);
+
return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
}
@@ -876,12 +862,14 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
/*
* init gadget
*/
- device_initialize(&gpriv->gadget.dev);
dev_set_name(&gpriv->gadget.dev, "gadget");
gpriv->gadget.dev.parent = dev;
gpriv->gadget.name = "renesas_usbhs_udc";
gpriv->gadget.ops = &usbhsg_gadget_ops;
gpriv->gadget.is_dualspeed = 1;
+ ret = device_register(&gpriv->gadget.dev);
+ if (ret < 0)
+ goto err_add_udc;
INIT_LIST_HEAD(&gpriv->gadget.ep_list);
@@ -912,12 +900,15 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
ret = usb_add_gadget_udc(dev, &gpriv->gadget);
if (ret)
- goto err_add_udc;
+ goto err_register;
dev_info(dev, "gadget probed\n");
return 0;
+
+err_register:
+ device_unregister(&gpriv->gadget.dev);
err_add_udc:
kfree(gpriv->uep);
@@ -933,6 +924,8 @@ void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
usb_del_gadget_udc(&gpriv->gadget);
+ device_unregister(&gpriv->gadget.dev);
+
usbhsg_controller_unregister(gpriv);
kfree(gpriv->uep);
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index bade761a1e5..7955de58995 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1267,6 +1267,7 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
dev_err(dev, "Failed to create hcd\n");
return -ENOMEM;
}
+ hcd->has_tt = 1; /* for low/full speed */
pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
if (!pipe_info) {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index bd4298bb675..ff3db5d056a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 571fa96b49c..055b64ef0bb 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -112,6 +112,7 @@
/* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739
/* Lenz LI-USB Computer Interface. */
#define FTDI_LENZ_LIUSB_PID 0xD780
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d865878c9f9..6dd64534fad 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -661,6 +661,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -747,6 +755,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 3041a974faf..24caba79d72 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Qinglin Ye <yestyle@gmail.com> */
+UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
+ "Kingston",
+ "DT 101 G2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BULK_IGNORE_TAG ),
+
/* Reported by Francesco Foresti <frafore@tiscali.it> */
UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
"Super Top",
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 55f91d9ab00..29577bf1f55 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -116,6 +116,7 @@
/* Clock registers available only on Version 2 */
#define LCD_CLK_ENABLE_REG 0x6c
#define LCD_CLK_RESET_REG 0x70
+#define LCD_CLK_MAIN_RESET BIT(3)
#define LCD_NUM_BUFFERS 2
@@ -244,6 +245,10 @@ static inline void lcd_enable_raster(void)
{
u32 reg;
+ /* Bring LCDC out of reset */
+ if (lcd_revision == LCD_VERSION_2)
+ lcdc_write(0, LCD_CLK_RESET_REG);
+
reg = lcdc_read(LCD_RASTER_CTRL_REG);
if (!(reg & LCD_RASTER_ENABLE))
lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
@@ -257,6 +262,10 @@ static inline void lcd_disable_raster(void)
reg = lcdc_read(LCD_RASTER_CTRL_REG);
if (reg & LCD_RASTER_ENABLE)
lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+
+ if (lcd_revision == LCD_VERSION_2)
+ /* Write 1 to reset LCDC */
+ lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
}
static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
@@ -584,8 +593,12 @@ static void lcd_reset(struct da8xx_fb_par *par)
lcdc_write(0, LCD_DMA_CTRL_REG);
lcdc_write(0, LCD_RASTER_CTRL_REG);
- if (lcd_revision == LCD_VERSION_2)
+ if (lcd_revision == LCD_VERSION_2) {
lcdc_write(0, LCD_INT_ENABLE_SET_REG);
+ /* Write 1 to reset */
+ lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
+ lcdc_write(0, LCD_CLK_RESET_REG);
+ }
}
static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 0ccd7adf47b..6f61e781f15 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 3532782551c..5c81533eaca 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -1720,12 +1720,11 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
unsigned long fclk = 0;
- if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
- if (width != out_width || height != out_height)
- return -EINVAL;
- else
- return 0;
- }
+ if (width == out_width && height == out_height)
+ return 0;
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
+ return -EINVAL;
if (out_width < width / maxdownscale ||
out_width > width * 8)
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 3262f0f1fa3..c56378c555b 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -269,7 +269,7 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
unsigned long hdmi_get_pixel_clock(void)
{
/* HDMI Pixel Clock in Mhz */
- return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000;
+ return hdmi.ip_data.cfg.timings.timings.pixel_clock * 1000;
}
static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 69d882cbe70..c01c1c16272 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -559,8 +559,8 @@
#define M1200X720_R60_VSP POSITIVE
/* 1200x900@60 Sync Polarity (DCON) */
-#define M1200X900_R60_HSP NEGATIVE
-#define M1200X900_R60_VSP NEGATIVE
+#define M1200X900_R60_HSP POSITIVE
+#define M1200X900_R60_VSP POSITIVE
/* 1280x600@60 Sync Polarity (GTF Mode) */
#define M1280x600_R60_HSP NEGATIVE
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 816ed08e7cf..1a61939b85f 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -37,7 +37,7 @@ config VIRTIO_BALLOON
config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ depends on HAS_IOMEM && EXPERIMENTAL
select VIRTIO
select VIRTIO_RING
---help---
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index acc5e43c373..7317dc2ec42 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -118,7 +118,7 @@ static void vm_finalize_features(struct virtio_device *vdev)
vring_transport_features(vdev);
for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
- writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET);
+ writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
writel(vdev->features[i],
vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
}
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 3d1bf41e889..03d1984bd36 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -169,11 +169,29 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
}
+/* wait for pending irq handlers */
+static void vp_synchronize_vectors(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ int i;
+
+ if (vp_dev->intx_enabled)
+ synchronize_irq(vp_dev->pci_dev->irq);
+
+ for (i = 0; i < vp_dev->msix_vectors; ++i)
+ synchronize_irq(vp_dev->msix_entries[i].vector);
+}
+
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+ /* Flush out the status write, and flush in device writes,
+ * including MSi-X interrupts, if any. */
+ ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+ /* Flush pending VQ/configuration callbacks. */
+ vp_synchronize_vectors(vdev);
}
/* the notify function used when creating a virt queue */
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8e964b91c44..284798aaf8b 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -166,7 +166,7 @@ retry:
/*
* Get IO TLB memory from any location.
*/
- xen_io_tlb_start = alloc_bootmem(bytes);
+ xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
if (!xen_io_tlb_start) {
m = "Cannot allocate Xen-SWIOTLB buffer!\n";
goto error;
@@ -179,7 +179,7 @@ retry:
bytes,
xen_io_tlb_nslabs);
if (rc) {
- free_bootmem(__pa(xen_io_tlb_start), bytes);
+ free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
m = "Failed to get contiguous memory for DMA from Xen!\n"\
"You either: don't have the permissions, do not have"\
" enough free memory under 4GB, or the hypervisor memory"\
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index b3b8f2f3ad1..ede860f921d 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -621,15 +621,6 @@ static struct xenbus_watch *find_watch(const char *token)
return NULL;
}
-static void xs_reset_watches(void)
-{
- int err;
-
- err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
- if (err && err != -EEXIST)
- printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
-}
-
/* Register callback to watch this node. */
int register_xenbus_watch(struct xenbus_watch *watch)
{
@@ -906,9 +897,5 @@ int xs_init(void)
if (IS_ERR(task))
return PTR_ERR(task);
- /* shutdown watches for kexec boot */
- if (xen_hvm_domain())
- xs_reset_watches();
-
return 0;
}
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 98ab240072e..704a2ba08ea 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -64,6 +64,8 @@ struct btrfs_worker_thread {
int idle;
};
+static int __btrfs_start_workers(struct btrfs_workers *workers);
+
/*
* btrfs_start_workers uses kthread_run, which can block waiting for memory
* for a very long time. It will actually throttle on page writeback,
@@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
{
struct worker_start *start;
start = container_of(work, struct worker_start, work);
- btrfs_start_workers(start->queue, 1);
+ __btrfs_start_workers(start->queue);
kfree(start);
}
-static int start_new_worker(struct btrfs_workers *queue)
-{
- struct worker_start *start;
- int ret;
-
- start = kzalloc(sizeof(*start), GFP_NOFS);
- if (!start)
- return -ENOMEM;
-
- start->work.func = start_new_worker_func;
- start->queue = queue;
- ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
- if (ret)
- kfree(start);
- return ret;
-}
-
/*
* helper function to move a thread onto the idle list after it
* has finished some requests.
@@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
{
struct btrfs_workers *workers = worker->workers;
+ struct worker_start *start;
unsigned long flags;
rmb();
if (!workers->atomic_start_pending)
return;
+ start = kzalloc(sizeof(*start), GFP_NOFS);
+ if (!start)
+ return;
+
+ start->work.func = start_new_worker_func;
+ start->queue = workers;
+
spin_lock_irqsave(&workers->lock, flags);
if (!workers->atomic_start_pending)
goto out;
@@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
workers->num_workers_starting += 1;
spin_unlock_irqrestore(&workers->lock, flags);
- start_new_worker(workers);
+ btrfs_queue_worker(workers->atomic_worker_start, &start->work);
return;
out:
+ kfree(start);
spin_unlock_irqrestore(&workers->lock, flags);
}
@@ -331,7 +325,7 @@ again:
run_ordered_completions(worker->workers, work);
check_pending_worker_creates(worker);
-
+ cond_resched();
}
spin_lock_irq(&worker->lock);
@@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
* starts new worker threads. This does not enforce the max worker
* count in case you need to temporarily go past it.
*/
-static int __btrfs_start_workers(struct btrfs_workers *workers,
- int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers)
{
struct btrfs_worker_thread *worker;
int ret = 0;
- int i;
- for (i = 0; i < num_workers; i++) {
- worker = kzalloc(sizeof(*worker), GFP_NOFS);
- if (!worker) {
- ret = -ENOMEM;
- goto fail;
- }
+ worker = kzalloc(sizeof(*worker), GFP_NOFS);
+ if (!worker) {
+ ret = -ENOMEM;
+ goto fail;
+ }
- INIT_LIST_HEAD(&worker->pending);
- INIT_LIST_HEAD(&worker->prio_pending);
- INIT_LIST_HEAD(&worker->worker_list);
- spin_lock_init(&worker->lock);
-
- atomic_set(&worker->num_pending, 0);
- atomic_set(&worker->refs, 1);
- worker->workers = workers;
- worker->task = kthread_run(worker_loop, worker,
- "btrfs-%s-%d", workers->name,
- workers->num_workers + i);
- if (IS_ERR(worker->task)) {
- ret = PTR_ERR(worker->task);
- kfree(worker);
- goto fail;
- }
- spin_lock_irq(&workers->lock);
- list_add_tail(&worker->worker_list, &workers->idle_list);
- worker->idle = 1;
- workers->num_workers++;
- workers->num_workers_starting--;
- WARN_ON(workers->num_workers_starting < 0);
- spin_unlock_irq(&workers->lock);
+ INIT_LIST_HEAD(&worker->pending);
+ INIT_LIST_HEAD(&worker->prio_pending);
+ INIT_LIST_HEAD(&worker->worker_list);
+ spin_lock_init(&worker->lock);
+
+ atomic_set(&worker->num_pending, 0);
+ atomic_set(&worker->refs, 1);
+ worker->workers = workers;
+ worker->task = kthread_run(worker_loop, worker,
+ "btrfs-%s-%d", workers->name,
+ workers->num_workers + 1);
+ if (IS_ERR(worker->task)) {
+ ret = PTR_ERR(worker->task);
+ kfree(worker);
+ goto fail;
}
+ spin_lock_irq(&workers->lock);
+ list_add_tail(&worker->worker_list, &workers->idle_list);
+ worker->idle = 1;
+ workers->num_workers++;
+ workers->num_workers_starting--;
+ WARN_ON(workers->num_workers_starting < 0);
+ spin_unlock_irq(&workers->lock);
+
return 0;
fail:
- btrfs_stop_workers(workers);
+ spin_lock_irq(&workers->lock);
+ workers->num_workers_starting--;
+ spin_unlock_irq(&workers->lock);
return ret;
}
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+int btrfs_start_workers(struct btrfs_workers *workers)
{
spin_lock_irq(&workers->lock);
- workers->num_workers_starting += num_workers;
+ workers->num_workers_starting++;
spin_unlock_irq(&workers->lock);
- return __btrfs_start_workers(workers, num_workers);
+ return __btrfs_start_workers(workers);
}
/*
@@ -568,6 +561,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
struct btrfs_worker_thread *worker;
unsigned long flags;
struct list_head *fallback;
+ int ret;
again:
spin_lock_irqsave(&workers->lock, flags);
@@ -584,7 +578,9 @@ again:
workers->num_workers_starting++;
spin_unlock_irqrestore(&workers->lock, flags);
/* we're below the limit, start another worker */
- __btrfs_start_workers(workers, 1);
+ ret = __btrfs_start_workers(workers);
+ if (ret)
+ goto fallback;
goto again;
}
}
@@ -665,7 +661,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
/*
* places a struct btrfs_work into the pending queue of one of the kthreads
*/
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
{
struct btrfs_worker_thread *worker;
unsigned long flags;
@@ -673,7 +669,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
/* don't requeue something already on a list */
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
- goto out;
+ return;
worker = find_worker(workers);
if (workers->ordered) {
@@ -712,7 +708,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
if (wake)
wake_up_process(worker->task);
spin_unlock_irqrestore(&worker->lock, flags);
-
-out:
- return 0;
}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 5077746cf85..f34cc31fa3c 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -109,8 +109,8 @@ struct btrfs_workers {
char *name;
};
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
+int btrfs_start_workers(struct btrfs_workers *workers);
int btrfs_stop_workers(struct btrfs_workers *workers);
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
struct btrfs_workers *async_starter);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 04a5dfcee5a..67385033323 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2369,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
int btrfs_block_rsv_refill(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
u64 min_reserved);
+int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv,
u64 num_bytes);
@@ -2689,7 +2692,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode, int flags);
+int btrfs_dirty_inode(struct inode *inode);
+int btrfs_update_time(struct file *file);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 5b163572e0c..9c1eccc2c50 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -640,8 +640,8 @@ static int btrfs_delayed_inode_reserve_metadata(
* Now if src_rsv == delalloc_block_rsv we'll let it just steal since
* we're accounted for.
*/
- if (!trans->bytes_reserved &&
- src_rsv != &root->fs_info->delalloc_block_rsv) {
+ if (!src_rsv || (!trans->bytes_reserved &&
+ src_rsv != &root->fs_info->delalloc_block_rsv)) {
ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
/*
* Since we're under a transaction reserve_metadata_bytes could
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index b0917590152..f99a099a774 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2190,19 +2190,27 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->endio_meta_write_workers.idle_thresh = 2;
fs_info->readahead_workers.idle_thresh = 2;
- btrfs_start_workers(&fs_info->workers, 1);
- btrfs_start_workers(&fs_info->generic_worker, 1);
- btrfs_start_workers(&fs_info->submit_workers, 1);
- btrfs_start_workers(&fs_info->delalloc_workers, 1);
- btrfs_start_workers(&fs_info->fixup_workers, 1);
- btrfs_start_workers(&fs_info->endio_workers, 1);
- btrfs_start_workers(&fs_info->endio_meta_workers, 1);
- btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
- btrfs_start_workers(&fs_info->endio_write_workers, 1);
- btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
- btrfs_start_workers(&fs_info->delayed_workers, 1);
- btrfs_start_workers(&fs_info->caching_workers, 1);
- btrfs_start_workers(&fs_info->readahead_workers, 1);
+ /*
+ * btrfs_start_workers can really only fail because of ENOMEM so just
+ * return -ENOMEM if any of these fail.
+ */
+ ret = btrfs_start_workers(&fs_info->workers);
+ ret |= btrfs_start_workers(&fs_info->generic_worker);
+ ret |= btrfs_start_workers(&fs_info->submit_workers);
+ ret |= btrfs_start_workers(&fs_info->delalloc_workers);
+ ret |= btrfs_start_workers(&fs_info->fixup_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_write_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
+ ret |= btrfs_start_workers(&fs_info->delayed_workers);
+ ret |= btrfs_start_workers(&fs_info->caching_workers);
+ ret |= btrfs_start_workers(&fs_info->readahead_workers);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_sb_buffer;
+ }
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 930ae894973..f5fbe576d2b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2822,7 +2822,7 @@ out_free:
btrfs_release_path(path);
out:
spin_lock(&block_group->lock);
- if (!ret)
+ if (!ret && dcs == BTRFS_DC_SETUP)
block_group->cache_generation = trans->transid;
block_group->disk_cache_state = dcs;
spin_unlock(&block_group->lock);
@@ -3888,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
return ret;
}
-int btrfs_block_rsv_refill(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 min_reserved)
+static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved, int flush)
{
u64 num_bytes = 0;
int ret = -ENOSPC;
@@ -3909,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
if (!ret)
return 0;
- ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
+ ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
if (!ret) {
block_rsv_add_bytes(block_rsv, num_bytes, 0);
return 0;
@@ -3918,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
return ret;
}
+int btrfs_block_rsv_refill(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved)
+{
+ return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
+}
+
+int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved)
+{
+ return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
+}
+
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv,
u64 num_bytes)
@@ -4190,12 +4204,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
u64 to_reserve = 0;
+ u64 csum_bytes;
unsigned nr_extents = 0;
+ int extra_reserve = 0;
int flush = 1;
int ret;
+ /* Need to be holding the i_mutex here if we aren't free space cache */
if (btrfs_is_free_space_inode(root, inode))
flush = 0;
+ else
+ WARN_ON(!mutex_is_locked(&inode->i_mutex));
if (flush && btrfs_transaction_in_commit(root->fs_info))
schedule_timeout(1);
@@ -4206,11 +4225,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
BTRFS_I(inode)->outstanding_extents++;
if (BTRFS_I(inode)->outstanding_extents >
- BTRFS_I(inode)->reserved_extents) {
+ BTRFS_I(inode)->reserved_extents)
nr_extents = BTRFS_I(inode)->outstanding_extents -
BTRFS_I(inode)->reserved_extents;
- BTRFS_I(inode)->reserved_extents += nr_extents;
- }
/*
* Add an item to reserve for updating the inode when we complete the
@@ -4218,11 +4235,12 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
*/
if (!BTRFS_I(inode)->delalloc_meta_reserved) {
nr_extents++;
- BTRFS_I(inode)->delalloc_meta_reserved = 1;
+ extra_reserve = 1;
}
to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
+ csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -4232,22 +4250,35 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
spin_lock(&BTRFS_I(inode)->lock);
dropped = drop_outstanding_extent(inode);
- to_free = calc_csum_metadata_size(inode, num_bytes, 0);
- spin_unlock(&BTRFS_I(inode)->lock);
- to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
/*
- * Somebody could have come in and twiddled with the
- * reservation, so if we have to free more than we would have
- * reserved from this reservation go ahead and release those
- * bytes.
+ * If the inodes csum_bytes is the same as the original
+ * csum_bytes then we know we haven't raced with any free()ers
+ * so we can just reduce our inodes csum bytes and carry on.
+ * Otherwise we have to do the normal free thing to account for
+ * the case that the free side didn't free up its reserve
+ * because of this outstanding reservation.
*/
- to_free -= to_reserve;
+ if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+ calc_csum_metadata_size(inode, num_bytes, 0);
+ else
+ to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+ spin_unlock(&BTRFS_I(inode)->lock);
+ if (dropped)
+ to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
if (to_free)
btrfs_block_rsv_release(root, block_rsv, to_free);
return ret;
}
+ spin_lock(&BTRFS_I(inode)->lock);
+ if (extra_reserve) {
+ BTRFS_I(inode)->delalloc_meta_reserved = 1;
+ nr_extents--;
+ }
+ BTRFS_I(inode)->reserved_extents += nr_extents;
+ spin_unlock(&BTRFS_I(inode)->lock);
+
block_rsv_add_bytes(block_rsv, to_reserve, 1);
return 0;
@@ -5093,11 +5124,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root = orig_root->fs_info->extent_root;
struct btrfs_free_cluster *last_ptr = NULL;
struct btrfs_block_group_cache *block_group = NULL;
+ struct btrfs_block_group_cache *used_block_group;
int empty_cluster = 2 * 1024 * 1024;
int allowed_chunk_alloc = 0;
int done_chunk_alloc = 0;
struct btrfs_space_info *space_info;
- int last_ptr_loop = 0;
int loop = 0;
int index = 0;
int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
@@ -5159,6 +5190,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
ideal_cache:
block_group = btrfs_lookup_block_group(root->fs_info,
search_start);
+ used_block_group = block_group;
/*
* we don't want to use the block group if it doesn't match our
* allocation bits, or if its not cached.
@@ -5196,6 +5228,7 @@ search:
u64 offset;
int cached;
+ used_block_group = block_group;
btrfs_get_block_group(block_group);
search_start = block_group->key.objectid;
@@ -5265,84 +5298,73 @@ alloc:
spin_lock(&block_group->free_space_ctl->tree_lock);
if (cached &&
block_group->free_space_ctl->free_space <
- num_bytes + empty_size) {
+ num_bytes + empty_cluster + empty_size) {
spin_unlock(&block_group->free_space_ctl->tree_lock);
goto loop;
}
spin_unlock(&block_group->free_space_ctl->tree_lock);
/*
- * Ok we want to try and use the cluster allocator, so lets look
- * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
- * have tried the cluster allocator plenty of times at this
- * point and not have found anything, so we are likely way too
- * fragmented for the clustering stuff to find anything, so lets
- * just skip it and let the allocator find whatever block it can
- * find
+ * Ok we want to try and use the cluster allocator, so
+ * lets look there
*/
- if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
+ if (last_ptr) {
/*
* the refill lock keeps out other
* people trying to start a new cluster
*/
spin_lock(&last_ptr->refill_lock);
- if (last_ptr->block_group &&
- (last_ptr->block_group->ro ||
- !block_group_bits(last_ptr->block_group, data))) {
- offset = 0;
+ used_block_group = last_ptr->block_group;
+ if (used_block_group != block_group &&
+ (!used_block_group ||
+ used_block_group->ro ||
+ !block_group_bits(used_block_group, data))) {
+ used_block_group = block_group;
goto refill_cluster;
}
- offset = btrfs_alloc_from_cluster(block_group, last_ptr,
- num_bytes, search_start);
+ if (used_block_group != block_group)
+ btrfs_get_block_group(used_block_group);
+
+ offset = btrfs_alloc_from_cluster(used_block_group,
+ last_ptr, num_bytes, used_block_group->key.objectid);
if (offset) {
/* we have a block, we're done */
spin_unlock(&last_ptr->refill_lock);
goto checks;
}
- spin_lock(&last_ptr->lock);
- /*
- * whoops, this cluster doesn't actually point to
- * this block group. Get a ref on the block
- * group is does point to and try again
- */
- if (!last_ptr_loop && last_ptr->block_group &&
- last_ptr->block_group != block_group &&
- index <=
- get_block_group_index(last_ptr->block_group)) {
-
- btrfs_put_block_group(block_group);
- block_group = last_ptr->block_group;
- btrfs_get_block_group(block_group);
- spin_unlock(&last_ptr->lock);
- spin_unlock(&last_ptr->refill_lock);
-
- last_ptr_loop = 1;
- search_start = block_group->key.objectid;
- /*
- * we know this block group is properly
- * in the list because
- * btrfs_remove_block_group, drops the
- * cluster before it removes the block
- * group from the list
- */
- goto have_block_group;
+ WARN_ON(last_ptr->block_group != used_block_group);
+ if (used_block_group != block_group) {
+ btrfs_put_block_group(used_block_group);
+ used_block_group = block_group;
}
- spin_unlock(&last_ptr->lock);
refill_cluster:
+ BUG_ON(used_block_group != block_group);
+ /* If we are on LOOP_NO_EMPTY_SIZE, we can't
+ * set up a new clusters, so lets just skip it
+ * and let the allocator find whatever block
+ * it can find. If we reach this point, we
+ * will have tried the cluster allocator
+ * plenty of times and not have found
+ * anything, so we are likely way too
+ * fragmented for the clustering stuff to find
+ * anything. */
+ if (loop >= LOOP_NO_EMPTY_SIZE) {
+ spin_unlock(&last_ptr->refill_lock);
+ goto unclustered_alloc;
+ }
+
/*
* this cluster didn't work out, free it and
* start over
*/
btrfs_return_cluster_to_free_space(NULL, last_ptr);
- last_ptr_loop = 0;
-
/* allocate a cluster in this block group */
ret = btrfs_find_space_cluster(trans, root,
block_group, last_ptr,
- offset, num_bytes,
+ search_start, num_bytes,
empty_cluster + empty_size);
if (ret == 0) {
/*
@@ -5378,6 +5400,7 @@ refill_cluster:
goto loop;
}
+unclustered_alloc:
offset = btrfs_find_space_for_alloc(block_group, search_start,
num_bytes, empty_size);
/*
@@ -5404,14 +5427,14 @@ checks:
search_start = stripe_align(root, offset);
/* move on to the next group */
if (search_start + num_bytes >= search_end) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
/* move on to the next group */
if (search_start + num_bytes >
- block_group->key.objectid + block_group->key.offset) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ used_block_group->key.objectid + used_block_group->key.offset) {
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
@@ -5419,14 +5442,14 @@ checks:
ins->offset = num_bytes;
if (offset < search_start)
- btrfs_add_free_space(block_group, offset,
+ btrfs_add_free_space(used_block_group, offset,
search_start - offset);
BUG_ON(offset > search_start);
- ret = btrfs_update_reserved_bytes(block_group, num_bytes,
+ ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
alloc_type);
if (ret == -EAGAIN) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
@@ -5435,15 +5458,19 @@ checks:
ins->offset = num_bytes;
if (offset < search_start)
- btrfs_add_free_space(block_group, offset,
+ btrfs_add_free_space(used_block_group, offset,
search_start - offset);
BUG_ON(offset > search_start);
+ if (used_block_group != block_group)
+ btrfs_put_block_group(used_block_group);
btrfs_put_block_group(block_group);
break;
loop:
failed_cluster_refill = false;
failed_alloc = false;
BUG_ON(index != get_block_group_index(block_group));
+ if (used_block_group != block_group)
+ btrfs_put_block_group(used_block_group);
btrfs_put_block_group(block_group);
}
up_read(&space_info->groups_sem);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 9472d3de5e5..49f3c9dc09f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -935,8 +935,10 @@ again:
node = tree_search(tree, start);
if (!node) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = insert_state(tree, prealloc, start, end, &bits);
prealloc = NULL;
BUG_ON(err == -EEXIST);
@@ -992,8 +994,10 @@ hit_next:
*/
if (state->start < start) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = split_state(tree, state, prealloc, start);
BUG_ON(err == -EEXIST);
prealloc = NULL;
@@ -1024,8 +1028,10 @@ hit_next:
this_end = last_start - 1;
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
/*
* Avoid to free 'prealloc' if it can be merged with
@@ -1051,8 +1057,10 @@ hit_next:
*/
if (state->start <= end && state->end > end) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = split_state(tree, state, prealloc, end + 1);
BUG_ON(err == -EEXIST);
@@ -2287,14 +2295,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
if (!uptodate) {
int failed_mirror;
failed_mirror = (int)(unsigned long)bio->bi_bdev;
- if (tree->ops && tree->ops->readpage_io_failed_hook)
- ret = tree->ops->readpage_io_failed_hook(
- bio, page, start, end,
- failed_mirror, state);
- else
- ret = bio_readpage_error(bio, page, start, end,
- failed_mirror, NULL);
+ /*
+ * The generic bio_readpage_error handles errors the
+ * following way: If possible, new read requests are
+ * created and submitted and will end up in
+ * end_bio_extent_readpage as well (if we're lucky, not
+ * in the !uptodate case). In that case it returns 0 and
+ * we just go on with the next page in our bio. If it
+ * can't handle the error it will return -EIO and we
+ * remain responsible for that page.
+ */
+ ret = bio_readpage_error(bio, page, start, end,
+ failed_mirror, NULL);
if (ret == 0) {
+error_handled:
uptodate =
test_bit(BIO_UPTODATE, &bio->bi_flags);
if (err)
@@ -2302,6 +2316,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
uncache_state(&cached);
continue;
}
+ if (tree->ops && tree->ops->readpage_io_failed_hook) {
+ ret = tree->ops->readpage_io_failed_hook(
+ bio, page, start, end,
+ failed_mirror, state);
+ if (ret == 0)
+ goto error_handled;
+ }
}
if (uptodate) {
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index dafdfa059bf..97fbe939c05 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1167,6 +1167,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
(sizeof(struct page *)));
+ nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
+ nrptrs = max(nrptrs, 8);
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
@@ -1387,7 +1389,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
goto out;
}
- file_update_time(file);
+ err = btrfs_update_time(file);
+ if (err) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
BTRFS_I(inode)->sequence++;
start_pos = round_down(pos, root->sectorsize);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 6e5b7e46369..ec23d43d0c3 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1470,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
{
info->offset = offset_to_bitmap(ctl, offset);
info->bytes = 0;
+ INIT_LIST_HEAD(&info->list);
link_free_space(ctl, info);
ctl->total_bitmaps++;
@@ -2319,6 +2320,7 @@ again:
if (!found) {
start = i;
+ cluster->max_size = 0;
found = true;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 526dd51a196..0a6b928813a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -38,6 +38,7 @@
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
+#include <linux/mount.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
@@ -2031,7 +2032,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
/* insert an orphan item to track this unlinked/truncated file */
if (insert >= 1) {
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
- BUG_ON(ret);
+ BUG_ON(ret && ret != -EEXIST);
}
/* insert an orphan item to track subvolume contains orphan files */
@@ -2158,6 +2159,38 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
if (ret && ret != -ESTALE)
goto out;
+ if (ret == -ESTALE && root == root->fs_info->tree_root) {
+ struct btrfs_root *dead_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int is_dead_root = 0;
+
+ /*
+ * this is an orphan in the tree root. Currently these
+ * could come from 2 sources:
+ * a) a snapshot deletion in progress
+ * b) a free space cache inode
+ * We need to distinguish those two, as the snapshot
+ * orphan must not get deleted.
+ * find_dead_roots already ran before us, so if this
+ * is a snapshot deletion, we should find the root
+ * in the dead_roots list
+ */
+ spin_lock(&fs_info->trans_lock);
+ list_for_each_entry(dead_root, &fs_info->dead_roots,
+ root_list) {
+ if (dead_root->root_key.objectid ==
+ found_key.objectid) {
+ is_dead_root = 1;
+ break;
+ }
+ }
+ spin_unlock(&fs_info->trans_lock);
+ if (is_dead_root) {
+ /* prevent this orphan from being found again */
+ key.offset = found_key.objectid - 1;
+ continue;
+ }
+ }
/*
* Inode is already gone but the orphan item is still there,
* kill the orphan item.
@@ -2191,7 +2224,14 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
continue;
}
nr_truncate++;
+ /*
+ * Need to hold the imutex for reservation purposes, not
+ * a huge deal here but I have a WARN_ON in
+ * btrfs_delalloc_reserve_space to catch offenders.
+ */
+ mutex_lock(&inode->i_mutex);
ret = btrfs_truncate(inode);
+ mutex_unlock(&inode->i_mutex);
} else {
nr_unlink++;
}
@@ -3327,7 +3367,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
u64 hint_byte = 0;
hole_size = last_byte - cur_offset;
- trans = btrfs_start_transaction(root, 2);
+ trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
break;
@@ -3337,6 +3377,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
cur_offset + hole_size,
&hint_byte, 1);
if (err) {
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
break;
}
@@ -3346,6 +3387,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
0, hole_size, 0, hole_size,
0, 0, 0);
if (err) {
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
break;
}
@@ -3353,6 +3395,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
btrfs_drop_extent_cache(inode, hole_start,
last_byte - 1, 0);
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
}
free_extent_map(em);
@@ -3370,6 +3413,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
static int btrfs_setsize(struct inode *inode, loff_t newsize)
{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_trans_handle *trans;
loff_t oldsize = i_size_read(inode);
int ret;
@@ -3377,16 +3422,19 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
return 0;
if (newsize > oldsize) {
- i_size_write(inode, newsize);
- btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
truncate_pagecache(inode, oldsize, newsize);
ret = btrfs_cont_expand(inode, oldsize, newsize);
- if (ret) {
- btrfs_setsize(inode, oldsize);
+ if (ret)
return ret;
- }
- mark_inode_dirty(inode);
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ i_size_write(inode, newsize);
+ btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+ ret = btrfs_update_inode(trans, root, inode);
+ btrfs_end_transaction_throttle(trans, root);
} else {
/*
@@ -3426,9 +3474,9 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid) {
setattr_copy(inode, attr);
- mark_inode_dirty(inode);
+ err = btrfs_dirty_inode(inode);
- if (attr->ia_valid & ATTR_MODE)
+ if (!err && attr->ia_valid & ATTR_MODE)
err = btrfs_acl_chmod(inode);
}
@@ -3490,7 +3538,7 @@ void btrfs_evict_inode(struct inode *inode)
* doing the truncate.
*/
while (1) {
- ret = btrfs_block_rsv_refill(root, rsv, min_size);
+ ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
/*
* Try and steal from the global reserve since we will
@@ -4204,42 +4252,80 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
-void btrfs_dirty_inode(struct inode *inode, int flags)
+int btrfs_dirty_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
if (BTRFS_I(inode)->dummy_inode)
- return;
+ return 0;
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
if (ret && ret == -ENOSPC) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans, root);
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- printk_ratelimited(KERN_ERR "btrfs: fail to "
- "dirty inode %llu error %ld\n",
- (unsigned long long)btrfs_ino(inode),
- PTR_ERR(trans));
- return;
- }
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
- if (ret) {
- printk_ratelimited(KERN_ERR "btrfs: fail to "
- "dirty inode %llu error %d\n",
- (unsigned long long)btrfs_ino(inode),
- ret);
- }
}
btrfs_end_transaction(trans, root);
if (BTRFS_I(inode)->delayed_node)
btrfs_balance_delayed_items(root);
+
+ return ret;
+}
+
+/*
+ * This is a copy of file_update_time. We need this so we can return error on
+ * ENOSPC for updating the inode in the case of file write and mmap writes.
+ */
+int btrfs_update_time(struct file *file)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct timespec now;
+ int ret;
+ enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+
+ /* First try to exhaust all avenues to not sync */
+ if (IS_NOCMTIME(inode))
+ return 0;
+
+ now = current_fs_time(inode->i_sb);
+ if (!timespec_equal(&inode->i_mtime, &now))
+ sync_it = S_MTIME;
+
+ if (!timespec_equal(&inode->i_ctime, &now))
+ sync_it |= S_CTIME;
+
+ if (IS_I_VERSION(inode))
+ sync_it |= S_VERSION;
+
+ if (!sync_it)
+ return 0;
+
+ /* Finally allowed to write? Takes lock. */
+ if (mnt_want_write_file(file))
+ return 0;
+
+ /* Only change inode inside the lock region */
+ if (sync_it & S_VERSION)
+ inode_inc_iversion(inode);
+ if (sync_it & S_CTIME)
+ inode->i_ctime = now;
+ if (sync_it & S_MTIME)
+ inode->i_mtime = now;
+ ret = btrfs_dirty_inode(inode);
+ if (!ret)
+ mark_inode_dirty_sync(inode);
+ mnt_drop_write(file->f_path.mnt);
+ return ret;
}
/*
@@ -4555,11 +4641,18 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+
+ inode->i_op = &btrfs_special_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
- inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
btrfs_update_inode(trans, root, inode);
}
@@ -4613,14 +4706,21 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+ inode->i_fop = &btrfs_file_operations;
+ inode->i_op = &btrfs_file_inode_operations;
+
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
out_unlock:
@@ -6303,7 +6403,12 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
u64 page_start;
u64 page_end;
+ /* Need this to keep space reservations serialized */
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
+ mutex_unlock(&inode->i_mutex);
+ if (!ret)
+ ret = btrfs_update_time(vma->vm_file);
if (ret) {
if (ret == -ENOMEM)
ret = VM_FAULT_OOM;
@@ -6515,8 +6620,9 @@ static int btrfs_truncate(struct inode *inode)
/* Just need the 1 for updating the inode */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
- goto out;
+ ret = err = PTR_ERR(trans);
+ trans = NULL;
+ break;
}
}
@@ -7076,14 +7182,21 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+ inode->i_fop = &btrfs_file_operations;
+ inode->i_op = &btrfs_file_inode_operations;
+
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
if (drop_inode)
@@ -7353,6 +7466,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.getattr = btrfs_getattr,
+ .setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a90e749ed6d..c04f02c7d5b 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -252,11 +252,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
+ btrfs_update_iflags(inode);
+ inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
- btrfs_update_iflags(inode);
- inode->i_ctime = CURRENT_TIME;
btrfs_end_transaction(trans, root);
mnt_drop_write(file->f_path.mnt);
@@ -858,8 +858,10 @@ static int cluster_pages_for_defrag(struct inode *inode,
return 0;
file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_space(inode,
num_pages << PAGE_CACHE_SHIFT);
+ mutex_unlock(&inode->i_mutex);
if (ret)
return ret;
again:
@@ -1278,7 +1280,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
}
ret = btrfs_grow_device(trans, device, new_size);
btrfs_commit_transaction(trans, root);
- } else {
+ } else if (new_size < old_size) {
ret = btrfs_shrink_device(device, new_size);
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index dff29d5e151..cfb55434a46 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2947,7 +2947,9 @@ static int relocate_file_extent_cluster(struct inode *inode,
index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
while (index <= last_index) {
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+ mutex_unlock(&inode->i_mutex);
if (ret)
goto out;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index fab420db512..ddf2c90d3fc 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
btrfs_release_path(swarn->path);
ipath = init_ipath(4096, local_root, swarn->path);
+ if (IS_ERR(ipath)) {
+ ret = PTR_ERR(ipath);
+ ipath = NULL;
+ goto err;
+ }
ret = paths_from_inode(inum, ipath);
if (ret < 0)
@@ -1530,18 +1535,22 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret = 0;
mutex_lock(&fs_info->scrub_lock);
if (fs_info->scrub_workers_refcnt == 0) {
btrfs_init_workers(&fs_info->scrub_workers, "scrub",
fs_info->thread_pool_size, &fs_info->generic_worker);
fs_info->scrub_workers.idle_thresh = 4;
- btrfs_start_workers(&fs_info->scrub_workers, 1);
+ ret = btrfs_start_workers(&fs_info->scrub_workers);
+ if (ret)
+ goto out;
}
++fs_info->scrub_workers_refcnt;
+out:
mutex_unlock(&fs_info->scrub_lock);
- return 0;
+ return ret;
}
static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 17ee7fc5e64..200f63bc667 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#include <linux/cleancache.h>
#include <linux/mnt_namespace.h>
+#include <linux/ratelimit.h>
#include "compat.h"
#include "delayed-inode.h"
#include "ctree.h"
@@ -1053,11 +1054,11 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
u64 avail_space;
u64 used_space;
u64 min_stripe_size;
- int min_stripes = 1;
+ int min_stripes = 1, num_stripes = 1;
int i = 0, nr_devices;
int ret;
- nr_devices = fs_info->fs_devices->rw_devices;
+ nr_devices = fs_info->fs_devices->open_devices;
BUG_ON(!nr_devices);
devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
@@ -1067,20 +1068,24 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
/* calc min stripe number for data space alloction */
type = btrfs_get_alloc_profile(root, 1);
- if (type & BTRFS_BLOCK_GROUP_RAID0)
+ if (type & BTRFS_BLOCK_GROUP_RAID0) {
min_stripes = 2;
- else if (type & BTRFS_BLOCK_GROUP_RAID1)
+ num_stripes = nr_devices;
+ } else if (type & BTRFS_BLOCK_GROUP_RAID1) {
min_stripes = 2;
- else if (type & BTRFS_BLOCK_GROUP_RAID10)
+ num_stripes = 2;
+ } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
min_stripes = 4;
+ num_stripes = 4;
+ }
if (type & BTRFS_BLOCK_GROUP_DUP)
min_stripe_size = 2 * BTRFS_STRIPE_LEN;
else
min_stripe_size = BTRFS_STRIPE_LEN;
- list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
- if (!device->in_fs_metadata)
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ if (!device->in_fs_metadata || !device->bdev)
continue;
avail_space = device->total_bytes - device->bytes_used;
@@ -1141,13 +1146,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
i = nr_devices - 1;
avail_space = 0;
while (nr_devices >= min_stripes) {
+ if (num_stripes > nr_devices)
+ num_stripes = nr_devices;
+
if (devices_info[i].max_avail >= min_stripe_size) {
int j;
u64 alloc_size;
- avail_space += devices_info[i].max_avail * min_stripes;
+ avail_space += devices_info[i].max_avail * num_stripes;
alloc_size = devices_info[i].max_avail;
- for (j = i + 1 - min_stripes; j <= i; j++)
+ for (j = i + 1 - num_stripes; j <= i; j++)
devices_info[j].max_avail -= alloc_size;
}
i--;
@@ -1264,6 +1272,16 @@ static int btrfs_unfreeze(struct super_block *sb)
return 0;
}
+static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
+{
+ int ret;
+
+ ret = btrfs_dirty_inode(inode);
+ if (ret)
+ printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
+ "error %d\n", btrfs_ino(inode), ret);
+}
+
static const struct super_operations btrfs_super_ops = {
.drop_inode = btrfs_drop_inode,
.evict_inode = btrfs_evict_inode,
@@ -1271,7 +1289,7 @@ static const struct super_operations btrfs_super_ops = {
.sync_fs = btrfs_sync_fs,
.show_options = btrfs_show_options,
.write_inode = btrfs_write_inode,
- .dirty_inode = btrfs_dirty_inode,
+ .dirty_inode = btrfs_fs_dirty_inode,
.alloc_inode = btrfs_alloc_inode,
.destroy_inode = btrfs_destroy_inode,
.statfs = btrfs_statfs,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c37433d3cd8..f4b839fd3c9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -295,6 +295,12 @@ loop_lock:
btrfs_requeue_work(&device->work);
goto done;
}
+ /* unplug every 64 requests just for good measure */
+ if (batch_run % 64 == 0) {
+ blk_finish_plug(&plug);
+ blk_start_plug(&plug);
+ sync_pending = 0;
+ }
}
cond_resched();
@@ -1611,7 +1617,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
return -EINVAL;
- bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
+ bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
root->fs_info->bdev_holder);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
@@ -3258,7 +3264,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
*/
if (atomic_read(&bbio->error) > bbio->max_errors) {
err = -EIO;
- } else if (err) {
+ } else {
/*
* this bio is actually up to date, we didn't
* go over the max number of errors
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 4144caf2f9d..173b1d22e59 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
/* dirty the head */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_head_snapc == NULL)
ci->i_head_snapc = ceph_get_snap_context(snapc);
++ci->i_wrbuffer_ref_head;
@@ -100,7 +100,7 @@ static int ceph_set_page_dirty(struct page *page)
ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
snapc, snapc->seq, snapc->num_snaps);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* now adjust page */
spin_lock_irq(&mapping->tree_lock);
@@ -391,7 +391,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
struct ceph_snap_context *snapc = NULL;
struct ceph_cap_snap *capsnap = NULL;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
capsnap->context, capsnap->dirty_pages);
@@ -407,7 +407,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
dout(" head snapc %p has %d dirty pages\n",
snapc, ci->i_wrbuffer_ref_head);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return snapc;
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 0f327c6c967..8b53193e4f7 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -309,7 +309,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
/*
* Find ceph_cap for given mds, if any.
*
- * Called with i_lock held.
+ * Called with i_ceph_lock held.
*/
static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
{
@@ -332,9 +332,9 @@ struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
{
struct ceph_cap *cap;
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return cap;
}
@@ -361,15 +361,16 @@ static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
int ceph_get_cap_mds(struct inode *inode)
{
+ struct ceph_inode_info *ci = ceph_inode(inode);
int mds;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
mds = __ceph_get_cap_mds(ceph_inode(inode));
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return mds;
}
/*
- * Called under i_lock.
+ * Called under i_ceph_lock.
*/
static void __insert_cap_node(struct ceph_inode_info *ci,
struct ceph_cap *new)
@@ -415,7 +416,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
*
* If I_FLUSH is set, leave the inode at the front of the list.
*
- * Caller holds i_lock
+ * Caller holds i_ceph_lock
* -> we take mdsc->cap_delay_lock
*/
static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
@@ -457,7 +458,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
/*
* Cancel delayed work on cap.
*
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
*/
static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
@@ -532,14 +533,14 @@ int ceph_add_cap(struct inode *inode,
wanted |= ceph_caps_for_mode(fmode);
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
if (new_cap) {
cap = new_cap;
new_cap = NULL;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
new_cap = get_cap(mdsc, caps_reservation);
if (new_cap == NULL)
return -ENOMEM;
@@ -625,7 +626,7 @@ retry:
if (fmode >= 0)
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
wake_up_all(&ci->i_cap_wq);
return 0;
}
@@ -792,7 +793,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
struct rb_node *p;
int ret = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
cap = rb_entry(p, struct ceph_cap, ci_node);
if (__cap_is_valid(cap) &&
@@ -801,7 +802,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
break;
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("ceph_caps_revoking %p %s = %d\n", inode,
ceph_cap_string(mask), ret);
return ret;
@@ -855,7 +856,7 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
}
/*
- * called under i_lock
+ * called under i_ceph_lock
*/
static int __ceph_is_any_caps(struct ceph_inode_info *ci)
{
@@ -865,7 +866,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
/*
* Remove a cap. Take steps to deal with a racing iterate_session_caps.
*
- * caller should hold i_lock.
+ * caller should hold i_ceph_lock.
* caller will not hold session s_mutex if called from destroy_inode.
*/
void __ceph_remove_cap(struct ceph_cap *cap)
@@ -1028,7 +1029,7 @@ static void __queue_cap_release(struct ceph_mds_session *session,
/*
* Queue cap releases when an inode is dropped from our cache. Since
- * inode is about to be destroyed, there is no need for i_lock.
+ * inode is about to be destroyed, there is no need for i_ceph_lock.
*/
void ceph_queue_caps_release(struct inode *inode)
{
@@ -1049,7 +1050,7 @@ void ceph_queue_caps_release(struct inode *inode)
/*
* Send a cap msg on the given inode. Update our caps state, then
- * drop i_lock and send the message.
+ * drop i_ceph_lock and send the message.
*
* Make note of max_size reported/requested from mds, revoked caps
* that have now been implemented.
@@ -1061,13 +1062,13 @@ void ceph_queue_caps_release(struct inode *inode)
* Return non-zero if delayed release, or we experienced an error
* such that the caller should requeue + retry later.
*
- * called with i_lock, then drops it.
+ * called with i_ceph_lock, then drops it.
* caller should hold snap_rwsem (read), s_mutex.
*/
static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
int op, int used, int want, int retain, int flushing,
unsigned *pflush_tid)
- __releases(cap->ci->vfs_inode->i_lock)
+ __releases(cap->ci->i_ceph_lock)
{
struct ceph_inode_info *ci = cap->ci;
struct inode *inode = &ci->vfs_inode;
@@ -1170,7 +1171,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
xattr_version = ci->i_xattrs.version;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
@@ -1198,13 +1199,13 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
* Unless @again is true, skip cap_snaps that were already sent to
* the MDS (i.e., during this session).
*
- * Called under i_lock. Takes s_mutex as needed.
+ * Called under i_ceph_lock. Takes s_mutex as needed.
*/
void __ceph_flush_snaps(struct ceph_inode_info *ci,
struct ceph_mds_session **psession,
int again)
- __releases(ci->vfs_inode->i_lock)
- __acquires(ci->vfs_inode->i_lock)
+ __releases(ci->i_ceph_lock)
+ __acquires(ci->i_ceph_lock)
{
struct inode *inode = &ci->vfs_inode;
int mds;
@@ -1261,7 +1262,7 @@ retry:
session = NULL;
}
if (!session) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
mutex_lock(&mdsc->mutex);
session = __ceph_lookup_mds_session(mdsc, mds);
mutex_unlock(&mdsc->mutex);
@@ -1275,7 +1276,7 @@ retry:
* deletion or migration. retry, and we'll
* get a better @mds value next time.
*/
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
goto retry;
}
@@ -1285,7 +1286,7 @@ retry:
list_del_init(&capsnap->flushing_item);
list_add_tail(&capsnap->flushing_item,
&session->s_cap_snaps_flushing);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
inode, capsnap, capsnap->follows, capsnap->flush_tid);
@@ -1302,7 +1303,7 @@ retry:
next_follows = capsnap->follows + 1;
ceph_put_cap_snap(capsnap);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
goto retry;
}
@@ -1322,11 +1323,9 @@ out:
static void ceph_flush_snaps(struct ceph_inode_info *ci)
{
- struct inode *inode = &ci->vfs_inode;
-
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_flush_snaps(ci, NULL, 0);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
@@ -1373,7 +1372,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
* Add dirty inode to the flushing list. Assigned a seq number so we
* can wait for caps to flush without starving.
*
- * Called under i_lock.
+ * Called under i_ceph_lock.
*/
static int __mark_caps_flushing(struct inode *inode,
struct ceph_mds_session *session)
@@ -1421,9 +1420,9 @@ static int try_nonblocking_invalidate(struct inode *inode)
struct ceph_inode_info *ci = ceph_inode(inode);
u32 invalidating_gen = ci->i_rdcache_gen;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
invalidate_mapping_pages(&inode->i_data, 0, -1);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (inode->i_data.nrpages == 0 &&
invalidating_gen == ci->i_rdcache_gen) {
@@ -1470,7 +1469,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
if (mdsc->stopping)
is_delayed = 1;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_FLUSH)
flags |= CHECK_CAPS_FLUSH;
@@ -1480,7 +1479,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
__ceph_flush_snaps(ci, &session, 0);
goto retry_locked;
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
retry_locked:
file_wanted = __ceph_caps_file_wanted(ci);
used = __ceph_caps_used(ci);
@@ -1634,7 +1633,7 @@ ack:
if (mutex_trylock(&session->s_mutex) == 0) {
dout("inverting session/ino locks on %p\n",
session);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (took_snap_rwsem) {
up_read(&mdsc->snap_rwsem);
took_snap_rwsem = 0;
@@ -1648,7 +1647,7 @@ ack:
if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
dout("inverting snap/in locks on %p\n",
inode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
down_read(&mdsc->snap_rwsem);
took_snap_rwsem = 1;
goto retry;
@@ -1664,10 +1663,10 @@ ack:
mds = cap->mds; /* remember mds, so we don't repeat */
sent++;
- /* __send_cap drops i_lock */
+ /* __send_cap drops i_ceph_lock */
delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
retain, flushing, NULL);
- goto retry; /* retake i_lock and restart our cap scan. */
+ goto retry; /* retake i_ceph_lock and restart our cap scan. */
}
/*
@@ -1681,7 +1680,7 @@ ack:
else if (!is_delayed || force_requeue)
__cap_delay_requeue(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (queue_invalidate)
ceph_queue_invalidate(inode);
@@ -1704,7 +1703,7 @@ static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
int flushing = 0;
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
goto out;
@@ -1716,7 +1715,7 @@ retry:
int delayed;
if (!session) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
session = cap->session;
mutex_lock(&session->s_mutex);
goto retry;
@@ -1727,18 +1726,18 @@ retry:
flushing = __mark_caps_flushing(inode, session);
- /* __send_cap drops i_lock */
+ /* __send_cap drops i_ceph_lock */
delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
cap->issued | cap->implemented, flushing,
flush_tid);
if (!delayed)
goto out_unlocked;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
out_unlocked:
if (session && unlock_session)
mutex_unlock(&session->s_mutex);
@@ -1753,7 +1752,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
struct ceph_inode_info *ci = ceph_inode(inode);
int i, ret = 1;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
for (i = 0; i < CEPH_CAP_BITS; i++)
if ((ci->i_flushing_caps & (1 << i)) &&
ci->i_cap_flush_tid[i] <= tid) {
@@ -1761,7 +1760,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
ret = 0;
break;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ret;
}
@@ -1868,10 +1867,10 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
struct ceph_mds_client *mdsc =
ceph_sb_to_client(inode->i_sb)->mdsc;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (__ceph_caps_dirty(ci))
__cap_delay_requeue_front(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
return err;
}
@@ -1894,7 +1893,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (cap && cap->session == session) {
dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
@@ -1904,7 +1903,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
pr_err("%p auth cap %p not mds%d ???\n", inode,
cap, session->s_mds);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
}
@@ -1921,7 +1920,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_cap *cap;
int delayed = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (cap && cap->session == session) {
dout("kick_flushing_caps %p cap %p %s\n", inode,
@@ -1932,14 +1931,14 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
cap->issued | cap->implemented,
ci->i_flushing_caps, NULL);
if (delayed) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
} else {
pr_err("%p auth cap %p not mds%d ???\n", inode,
cap, session->s_mds);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
}
}
@@ -1952,7 +1951,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
struct ceph_cap *cap;
int delayed = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
@@ -1964,12 +1963,12 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
cap->issued | cap->implemented,
ci->i_flushing_caps, NULL);
if (delayed) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
}
@@ -1978,7 +1977,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
* Take references to capabilities we hold, so that we don't release
* them to the MDS prematurely.
*
- * Protected by i_lock.
+ * Protected by i_ceph_lock.
*/
static void __take_cap_refs(struct ceph_inode_info *ci, int got)
{
@@ -2016,7 +2015,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
dout("get_cap_refs %p need %s want %s\n", inode,
ceph_cap_string(need), ceph_cap_string(want));
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
/* make sure file is actually open */
file_wanted = __ceph_caps_file_wanted(ci);
@@ -2077,7 +2076,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
ceph_cap_string(have), ceph_cap_string(need));
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("get_cap_refs %p ret %d got %s\n", inode,
ret, ceph_cap_string(*got));
return ret;
@@ -2094,7 +2093,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
int check = 0;
/* do we need to explicitly request a larger max_size? */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if ((endoff >= ci->i_max_size ||
endoff > (inode->i_size << 1)) &&
endoff > ci->i_wanted_max_size) {
@@ -2103,7 +2102,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
ci->i_wanted_max_size = endoff;
check = 1;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (check)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
}
@@ -2140,9 +2139,9 @@ retry:
*/
void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
{
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
__take_cap_refs(ci, caps);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
@@ -2160,7 +2159,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
int last = 0, put = 0, flushsnaps = 0, wake = 0;
struct ceph_cap_snap *capsnap;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (had & CEPH_CAP_PIN)
--ci->i_pin_ref;
if (had & CEPH_CAP_FILE_RD)
@@ -2193,7 +2192,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
}
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
last ? " last" : "", put ? " put" : "");
@@ -2225,7 +2224,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
int found = 0;
struct ceph_cap_snap *capsnap = NULL;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_wrbuffer_ref -= nr;
last = !ci->i_wrbuffer_ref;
@@ -2274,7 +2273,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (last) {
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -2291,7 +2290,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
* Handle a cap GRANT message from the MDS. (Note that a GRANT may
* actually be a revocation if it specifies a smaller cap set.)
*
- * caller holds s_mutex and i_lock, we drop both.
+ * caller holds s_mutex and i_ceph_lock, we drop both.
*
* return value:
* 0 - ok
@@ -2302,7 +2301,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
struct ceph_mds_session *session,
struct ceph_cap *cap,
struct ceph_buffer *xattr_buf)
- __releases(inode->i_lock)
+ __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
@@ -2453,7 +2452,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
}
BUG_ON(cap->issued & ~cap->implemented);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (writeback)
/*
* queue inode for writeback: we can't actually call
@@ -2483,7 +2482,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
struct ceph_mds_caps *m,
struct ceph_mds_session *session,
struct ceph_cap *cap)
- __releases(inode->i_lock)
+ __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -2539,7 +2538,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
wake_up_all(&ci->i_cap_wq);
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (drop)
iput(inode);
}
@@ -2562,7 +2561,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
inode, ci, session->s_mds, follows);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
if (capsnap->follows == follows) {
if (capsnap->flush_tid != flush_tid) {
@@ -2585,7 +2584,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
capsnap, capsnap->follows);
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (drop)
iput(inode);
}
@@ -2598,7 +2597,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
static void handle_cap_trunc(struct inode *inode,
struct ceph_mds_caps *trunc,
struct ceph_mds_session *session)
- __releases(inode->i_lock)
+ __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
@@ -2617,7 +2616,7 @@ static void handle_cap_trunc(struct inode *inode,
inode, mds, seq, truncate_size, truncate_seq);
queue_trunc = ceph_fill_file_size(inode, issued,
truncate_seq, truncate_size, size);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (queue_trunc)
ceph_queue_vmtruncate(inode);
@@ -2646,7 +2645,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
inode, ci, mds, mseq);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
/* make sure we haven't seen a higher mseq */
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
@@ -2690,7 +2689,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
}
/* else, we already released it */
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
@@ -2745,9 +2744,9 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
up_read(&mdsc->snap_rwsem);
/* make sure we re-request max_size, if necessary */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_requested_max_size = 0;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
@@ -2762,6 +2761,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
struct ceph_mds_client *mdsc = session->s_mdsc;
struct super_block *sb = mdsc->fsc->sb;
struct inode *inode;
+ struct ceph_inode_info *ci;
struct ceph_cap *cap;
struct ceph_mds_caps *h;
int mds = session->s_mds;
@@ -2815,6 +2815,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
/* lookup ino */
inode = ceph_find_inode(sb, vino);
+ ci = ceph_inode(inode);
dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
vino.snap, inode);
if (!inode) {
@@ -2844,16 +2845,16 @@ void ceph_handle_caps(struct ceph_mds_session *session,
}
/* the rest require a cap */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ceph_inode(inode), mds);
if (!cap) {
dout(" no cap on %p ino %llx.%llx from mds%d\n",
inode, ceph_ino(inode), ceph_snap(inode), mds);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
goto flush_cap_releases;
}
- /* note that each of these drops i_lock for us */
+ /* note that each of these drops i_ceph_lock for us */
switch (op) {
case CEPH_CAP_OP_REVOKE:
case CEPH_CAP_OP_GRANT:
@@ -2869,7 +2870,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
break;
default:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
ceph_cap_op_name(op));
}
@@ -2962,13 +2963,13 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
struct inode *inode = &ci->vfs_inode;
int last = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
BUG_ON(ci->i_nr_by_mode[fmode] == 0);
if (--ci->i_nr_by_mode[fmode] == 0)
last++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (last && ci->i_vino.snap == CEPH_NOSNAP)
ceph_check_caps(ci, 0, NULL);
@@ -2991,7 +2992,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
int used, dirty;
int ret = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
@@ -3046,7 +3047,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
inode, cap, ceph_cap_string(cap->issued));
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ret;
}
@@ -3061,7 +3062,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
/*
* force an record for the directory caps if we have a dentry lease.
- * this is racy (can't take i_lock and d_lock together), but it
+ * this is racy (can't take i_ceph_lock and d_lock together), but it
* doesn't have to be perfect; the mds will revoke anything we don't
* release.
*/
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index bca3948e9db..3eeb9766126 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -281,18 +281,18 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
/* can we use the dcache? */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if ((filp->f_pos == 2 || fi->dentry) &&
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
ceph_snap(inode) != CEPH_SNAPDIR &&
ceph_dir_test_complete(inode) &&
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = __dcache_readdir(filp, dirent, filldir);
if (err != -EAGAIN)
return err;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
if (fi->dentry) {
err = note_last_dentry(fi, fi->dentry->d_name.name,
@@ -428,12 +428,12 @@ more:
* were released during the whole readdir, and we should have
* the complete dir contents in our cache.
*/
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_release_count == fi->dir_release_count) {
ceph_dir_set_complete(inode);
ci->i_max_offset = filp->f_pos;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("readdir %p filp %p done.\n", inode, filp);
return 0;
@@ -607,7 +607,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_dentry_info *di = ceph_dentry(dentry);
- spin_lock(&dir->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
if (strncmp(dentry->d_name.name,
fsc->mount_options->snapdir_name,
@@ -615,13 +615,13 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
!is_root_ceph_dentry(dir, dentry) &&
ceph_dir_test_complete(dir) &&
(__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
- spin_unlock(&dir->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout(" dir %p complete, -ENOENT\n", dir);
d_add(dentry, NULL);
di->lease_shared_gen = ci->i_shared_gen;
return NULL;
}
- spin_unlock(&dir->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
op = ceph_snap(dir) == CEPH_SNAPDIR ?
@@ -841,12 +841,12 @@ static int drop_caps_for_unlink(struct inode *inode)
struct ceph_inode_info *ci = ceph_inode(inode);
int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (inode->i_nlink == 1) {
drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
ci->i_ceph_flags |= CEPH_I_NODELAY;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return drop;
}
@@ -1015,10 +1015,10 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
struct ceph_dentry_info *di = ceph_dentry(dentry);
int valid = 0;
- spin_lock(&dir->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_shared_gen == di->lease_shared_gen)
valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
- spin_unlock(&dir->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
dir, (unsigned)ci->i_shared_gen, dentry,
(unsigned)di->lease_shared_gen, valid);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ce549d31eeb..ed72428d9c7 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -147,9 +147,9 @@ int ceph_open(struct inode *inode, struct file *file)
/* trivially open snapdir */
if (ceph_snap(inode) == CEPH_SNAPDIR) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ceph_init_file(inode, file, fmode);
}
@@ -158,7 +158,7 @@ int ceph_open(struct inode *inode, struct file *file)
* write) or any MDS (for read). Update wanted set
* asynchronously.
*/
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (__ceph_is_any_real_caps(ci) &&
(((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
int mds_wanted = __ceph_caps_mds_wanted(ci);
@@ -168,7 +168,7 @@ int ceph_open(struct inode *inode, struct file *file)
inode, fmode, ceph_cap_string(wanted),
ceph_cap_string(issued));
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* adjust wanted? */
if ((issued & wanted) != wanted &&
@@ -180,10 +180,10 @@ int ceph_open(struct inode *inode, struct file *file)
} else if (ceph_snap(inode) != CEPH_NOSNAP &&
(ci->i_snap_caps & wanted) == wanted) {
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ceph_init_file(inode, file, fmode);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
req = prepare_open_request(inode->i_sb, flags, 0);
@@ -743,9 +743,9 @@ retry_snap:
*/
int dirty;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ceph_put_cap_refs(ci, got);
ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
@@ -764,9 +764,9 @@ retry_snap:
if (ret >= 0) {
int dirty;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
}
@@ -797,7 +797,8 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
mutex_lock(&inode->i_mutex);
__ceph_do_pending_vmtruncate(inode);
- if (origin != SEEK_CUR || origin != SEEK_SET) {
+
+ if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
if (ret < 0) {
offset = ret;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 116f36502f1..87fb132fb33 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -297,6 +297,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
dout("alloc_inode %p\n", &ci->vfs_inode);
+ spin_lock_init(&ci->i_ceph_lock);
+
ci->i_version = 0;
ci->i_time_warp_seq = 0;
ci->i_ceph_flags = 0;
@@ -583,7 +585,7 @@ static int fill_inode(struct inode *inode,
iinfo->xattr_len);
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
/*
* provided version will be odd if inode value is projected,
@@ -680,7 +682,7 @@ static int fill_inode(struct inode *inode,
char *sym;
BUG_ON(symlen != inode->i_size);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = -ENOMEM;
sym = kmalloc(symlen+1, GFP_NOFS);
@@ -689,7 +691,7 @@ static int fill_inode(struct inode *inode,
memcpy(sym, iinfo->symlink, symlen);
sym[symlen] = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (!ci->i_symlink)
ci->i_symlink = sym;
else
@@ -715,7 +717,7 @@ static int fill_inode(struct inode *inode,
}
no_change:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* queue truncate if we saw i_size decrease */
if (queue_trunc)
@@ -750,13 +752,13 @@ no_change:
info->cap.flags,
caps_reservation);
} else {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout(" %p got snap_caps %s\n", inode,
ceph_cap_string(le32_to_cpu(info->cap.caps)));
ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
if (cap_fmode >= 0)
__ceph_get_fmode(ci, cap_fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
} else if (cap_fmode >= 0) {
pr_warning("mds issued no caps on %llx.%llx\n",
@@ -849,19 +851,20 @@ static void ceph_set_dentry_offset(struct dentry *dn)
{
struct dentry *dir = dn->d_parent;
struct inode *inode = dir->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_dentry_info *di;
BUG_ON(!inode);
di = ceph_dentry(dn);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (!ceph_dir_test_complete(inode)) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return;
}
di->offset = ceph_inode(inode)->i_max_offset++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
spin_lock(&dir->d_lock);
spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
@@ -1308,7 +1311,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
struct ceph_inode_info *ci = ceph_inode(inode);
int ret = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
inode->i_size = size;
inode->i_blocks = (size + (1 << 9) - 1) >> 9;
@@ -1318,7 +1321,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
(ci->i_reported_size << 1) < ci->i_max_size)
ret = 1;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ret;
}
@@ -1376,20 +1379,20 @@ static void ceph_invalidate_work(struct work_struct *work)
u32 orig_gen;
int check = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("invalidate_pages %p gen %d revoking %d\n", inode,
ci->i_rdcache_gen, ci->i_rdcache_revoking);
if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
/* nevermind! */
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
goto out;
}
orig_gen = ci->i_rdcache_gen;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
truncate_inode_pages(&inode->i_data, 0);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (orig_gen == ci->i_rdcache_gen &&
orig_gen == ci->i_rdcache_revoking) {
dout("invalidate_pages %p gen %d successful\n", inode,
@@ -1401,7 +1404,7 @@ static void ceph_invalidate_work(struct work_struct *work)
inode, orig_gen, ci->i_rdcache_gen,
ci->i_rdcache_revoking);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (check)
ceph_check_caps(ci, 0, NULL);
@@ -1460,10 +1463,10 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
int wrbuffer_refs, wake = 0;
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_truncate_pending == 0) {
dout("__do_pending_vmtruncate %p none pending\n", inode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return;
}
@@ -1474,7 +1477,7 @@ retry:
if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
dout("__do_pending_vmtruncate %p flushing snaps first\n",
inode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
filemap_write_and_wait_range(&inode->i_data, 0,
inode->i_sb->s_maxbytes);
goto retry;
@@ -1484,15 +1487,15 @@ retry:
wrbuffer_refs = ci->i_wrbuffer_ref;
dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
ci->i_truncate_pending, to);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
truncate_inode_pages(inode->i_mapping, to);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_truncate_pending--;
if (ci->i_truncate_pending == 0)
wake = 1;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (wrbuffer_refs == 0)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -1547,7 +1550,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
if (IS_ERR(req))
return PTR_ERR(req);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
issued = __ceph_caps_issued(ci, NULL);
dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
@@ -1695,7 +1698,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
}
release &= issued;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (inode_dirty_flags)
__mark_inode_dirty(inode, inode_dirty_flags);
@@ -1717,7 +1720,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
__ceph_do_pending_vmtruncate(inode);
return err;
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ceph_mdsc_put_request(req);
return err;
}
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 5a14c29cbba..790914a598d 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -241,11 +241,11 @@ static long ceph_ioctl_lazyio(struct file *file)
struct ceph_inode_info *ci = ceph_inode(inode);
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_nr_by_mode[fi->fmode]--;
fi->fmode |= CEPH_FILE_MODE_LAZY;
ci->i_nr_by_mode[fi->fmode]++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("ioctl_layzio: file %p marked lazy\n", file);
ceph_check_caps(ci, 0, NULL);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 264ab701154..6203d805eb4 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -732,21 +732,21 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
}
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = NULL;
if (mode == USE_AUTH_MDS)
cap = ci->i_auth_cap;
if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
if (!cap) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
goto random;
}
mds = cap->session->s_mds;
dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
inode, ceph_vinop(inode), mds,
cap == ci->i_auth_cap ? "auth " : "", cap);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return mds;
random:
@@ -951,7 +951,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
dout("removing cap %p, ci is %p, inode is %p\n",
cap, ci, &ci->vfs_inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_remove_cap(cap);
if (!__ceph_is_any_real_caps(ci)) {
struct ceph_mds_client *mdsc =
@@ -984,7 +984,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
}
spin_unlock(&mdsc->cap_dirty_lock);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
while (drop--)
iput(inode);
return 0;
@@ -1015,10 +1015,10 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
wake_up_all(&ci->i_cap_wq);
if (arg) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_wanted_max_size = 0;
ci->i_requested_max_size = 0;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
return 0;
}
@@ -1151,7 +1151,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
if (session->s_trim_caps <= 0)
return -1;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
mine = cap->issued | cap->implemented;
used = __ceph_caps_used(ci);
oissued = __ceph_caps_issued_other(ci, cap);
@@ -1170,7 +1170,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
__ceph_remove_cap(cap);
} else {
/* try to drop referring dentries */
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
d_prune_aliases(inode);
dout("trim_caps_cb %p cap %p pruned, count now %d\n",
inode, cap, atomic_read(&inode->i_count));
@@ -1178,7 +1178,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return 0;
}
@@ -1296,7 +1296,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
i_flushing_item);
struct inode *inode = &ci->vfs_inode;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_cap_flush_seq <= want_flush_seq) {
dout("check_cap_flush still flushing %p "
"seq %lld <= %lld to mds%d\n", inode,
@@ -1304,7 +1304,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
session->s_mds);
ret = 0;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
@@ -1495,6 +1495,7 @@ retry:
pos, temp);
} else if (stop_on_nosnap && inode &&
ceph_snap(inode) == CEPH_NOSNAP) {
+ spin_unlock(&temp->d_lock);
break;
} else {
pos -= temp->d_name.len;
@@ -2011,10 +2012,10 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
struct ceph_inode_info *ci = ceph_inode(inode);
dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ceph_dir_clear_complete(inode);
ci->i_release_count++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (req->r_dentry)
ceph_invalidate_dentry_lease(req->r_dentry);
@@ -2422,7 +2423,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
if (err)
goto out_free;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap->seq = 0; /* reset cap seq */
cap->issue_seq = 0; /* and issue_seq */
@@ -2445,7 +2446,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
rec.v1.pathbase = cpu_to_le64(pathbase);
reclen = sizeof(rec.v1);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (recon_state->flock) {
int num_fcntl_locks, num_flock_locks;
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 4bb239921db..a50ca0e3947 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -20,7 +20,7 @@
*
* mdsc->snap_rwsem
*
- * inode->i_lock
+ * ci->i_ceph_lock
* mdsc->snap_flush_lock
* mdsc->cap_delay_lock
*
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e2643719133..a559c80f127 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -446,7 +446,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
return;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
@@ -528,7 +528,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
kfree(capsnap);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
@@ -537,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
*
* If capsnap can now be flushed, add to snap_flush list, and return 1.
*
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
*/
int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap)
@@ -739,9 +739,9 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
inode = &ci->vfs_inode;
ihold(inode);
spin_unlock(&mdsc->snap_flush_lock);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_flush_snaps(ci, &session, 0);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
iput(inode);
spin_lock(&mdsc->snap_flush_lock);
}
@@ -847,7 +847,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
continue;
ci = ceph_inode(inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (!ci->i_snap_realm)
goto skip_inode;
/*
@@ -876,7 +876,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
oldrealm = ci->i_snap_realm;
ci->i_snap_realm = realm;
spin_unlock(&realm->inodes_with_caps_lock);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ceph_get_snap_realm(mdsc, realm);
ceph_put_snap_realm(mdsc, oldrealm);
@@ -885,7 +885,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
continue;
skip_inode:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
iput(inode);
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 8dc73a594a9..b48f15f101a 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -383,7 +383,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
seq_printf(m, ",rsize=%d", fsopt->rsize);
if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
- seq_printf(m, ",rasize=%d", fsopt->rsize);
+ seq_printf(m, ",rasize=%d", fsopt->rasize);
if (fsopt->congestion_kb != default_congestion_kb())
seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 01bf189e08a..edcbf3774a5 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -220,7 +220,7 @@ struct ceph_dentry_info {
* The locking for D_COMPLETE is a bit odd:
* - we can clear it at almost any time (see ceph_d_prune)
* - it is only meaningful if:
- * - we hold dir inode i_lock
+ * - we hold dir inode i_ceph_lock
* - we hold dir FILE_SHARED caps
* - the dentry D_COMPLETE is set
*/
@@ -250,6 +250,8 @@ struct ceph_inode_xattrs_info {
struct ceph_inode_info {
struct ceph_vino i_vino; /* ceph ino + snap */
+ spinlock_t i_ceph_lock;
+
u64 i_version;
u32 i_time_warp_seq;
@@ -271,7 +273,7 @@ struct ceph_inode_info {
struct ceph_inode_xattrs_info i_xattrs;
- /* capabilities. protected _both_ by i_lock and cap->session's
+ /* capabilities. protected _both_ by i_ceph_lock and cap->session's
* s_mutex. */
struct rb_root i_caps; /* cap list */
struct ceph_cap *i_auth_cap; /* authoritative cap, if any */
@@ -437,18 +439,18 @@ static inline void ceph_i_clear(struct inode *inode, unsigned mask)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_ceph_flags &= ~mask;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
static inline void ceph_i_set(struct inode *inode, unsigned mask)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_ceph_flags |= mask;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
static inline bool ceph_i_test(struct inode *inode, unsigned mask)
@@ -456,9 +458,9 @@ static inline bool ceph_i_test(struct inode *inode, unsigned mask)
struct ceph_inode_info *ci = ceph_inode(inode);
bool r;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
r = (ci->i_ceph_flags & mask) == mask;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return r;
}
@@ -508,9 +510,9 @@ extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
static inline int ceph_caps_issued(struct ceph_inode_info *ci)
{
int issued;
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
issued = __ceph_caps_issued(ci, NULL);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return issued;
}
@@ -518,9 +520,9 @@ static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
int touch)
{
int r;
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
r = __ceph_caps_issued_mask(ci, mask, touch);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return r;
}
@@ -743,10 +745,9 @@ extern int ceph_add_cap(struct inode *inode,
extern void __ceph_remove_cap(struct ceph_cap *cap);
static inline void ceph_remove_cap(struct ceph_cap *cap)
{
- struct inode *inode = &cap->ci->vfs_inode;
- spin_lock(&inode->i_lock);
+ spin_lock(&cap->ci->i_ceph_lock);
__ceph_remove_cap(cap);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&cap->ci->i_ceph_lock);
}
extern void ceph_put_cap(struct ceph_mds_client *mdsc,
struct ceph_cap *cap);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 96c6739a028..a5e36e4488a 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -343,8 +343,8 @@ void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
}
static int __build_xattrs(struct inode *inode)
- __releases(inode->i_lock)
- __acquires(inode->i_lock)
+ __releases(ci->i_ceph_lock)
+ __acquires(ci->i_ceph_lock)
{
u32 namelen;
u32 numattr = 0;
@@ -372,7 +372,7 @@ start:
end = p + ci->i_xattrs.blob->vec.iov_len;
ceph_decode_32_safe(&p, end, numattr, bad);
xattr_version = ci->i_xattrs.version;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
GFP_NOFS);
@@ -387,7 +387,7 @@ start:
goto bad_lock;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_xattrs.version != xattr_version) {
/* lost a race, retry */
for (i = 0; i < numattr; i++)
@@ -418,7 +418,7 @@ start:
return err;
bad_lock:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
bad:
if (xattrs) {
for (i = 0; i < numattr; i++)
@@ -512,7 +512,7 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
if (vxattrs)
vxattr = ceph_match_vxattr(vxattrs, name);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
ci->i_xattrs.version, ci->i_xattrs.index_version);
@@ -520,14 +520,14 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
(ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
goto get_xattr;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* get xattrs from mds (if we don't already have them) */
err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
if (err)
return err;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (vxattr && vxattr->readonly) {
err = vxattr->getxattr_cb(ci, value, size);
@@ -558,7 +558,7 @@ get_xattr:
memcpy(value, xattr->val, xattr->val_len);
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return err;
}
@@ -573,7 +573,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
u32 len;
int i;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
ci->i_xattrs.version, ci->i_xattrs.index_version);
@@ -581,13 +581,13 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
(ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
goto list_xattr;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
if (err)
return err;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
err = __build_xattrs(inode);
if (err < 0)
@@ -619,7 +619,7 @@ list_xattr:
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return err;
}
@@ -739,7 +739,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
if (!xattr)
goto out;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
retry:
issued = __ceph_caps_issued(ci, NULL);
if (!(issued & CEPH_CAP_XATTR_EXCL))
@@ -752,12 +752,12 @@ retry:
required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
struct ceph_buffer *blob = NULL;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout(" preaallocating new blob size=%d\n", required_blob_size);
blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
if (!blob)
goto out;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_xattrs.prealloc_blob)
ceph_buffer_put(ci->i_xattrs.prealloc_blob);
ci->i_xattrs.prealloc_blob = blob;
@@ -770,13 +770,13 @@ retry:
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
ci->i_xattrs.dirty = true;
inode->i_ctime = CURRENT_TIME;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
return err;
do_sync:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = ceph_sync_setxattr(dentry, name, value, size, flags);
out:
kfree(newname);
@@ -833,7 +833,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
return -EOPNOTSUPP;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__build_xattrs(inode);
issued = __ceph_caps_issued(ci, NULL);
dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
@@ -846,12 +846,12 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
ci->i_xattrs.dirty = true;
inode->i_ctime = CURRENT_TIME;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
return err;
do_sync:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = ceph_send_removexattr(dentry, name);
return err;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d6a972df033..8cd4b52d421 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -441,6 +441,8 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
smb_msg.msg_controllen = 0;
for (total_read = 0; to_read; total_read += length, to_read -= length) {
+ try_to_freeze();
+
if (server_unresponsive(server)) {
total_read = -EAGAIN;
break;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index cf0b1539b32..4dd9283885e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -702,6 +702,13 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
lock->type, lock->netfid, conf_lock);
}
+/*
+ * Check if there is another lock that prevents us to set the lock (mandatory
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
static int
cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
__u8 type, __u16 netfid, struct file_lock *flock)
@@ -739,6 +746,12 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
mutex_unlock(&cinode->lock_mutex);
}
+/*
+ * Set the byte-range lock (mandatory style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if no locks prevent us but we need to request to the server;
+ * 3) -EACCESS, if there is a lock that prevents us and wait is false.
+ */
static int
cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
bool wait)
@@ -778,6 +791,13 @@ try_again:
return rc;
}
+/*
+ * Check if there is another lock that prevents us to set the lock (posix
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
static int
cifs_posix_lock_test(struct file *file, struct file_lock *flock)
{
@@ -800,6 +820,12 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
return rc;
}
+/*
+ * Set the byte-range lock (posix style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if we need to request to the server;
+ * 3) <0, if the error occurs while setting the lock.
+ */
static int
cifs_posix_lock_set(struct file *file, struct file_lock *flock)
{
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 5de03ec2014..a090bbe6ee2 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -554,7 +554,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
rc);
return rc;
}
- cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+ /* FindFirst/Next set last_entry to NULL on malformed reply */
+ if (cifsFile->srch_inf.last_entry)
+ cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+ cifsFile);
}
while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
@@ -562,7 +565,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
cFYI(1, "calling findnext2");
rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
&cifsFile->srch_inf);
- cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+ /* FindFirst/Next set last_entry to NULL on malformed reply */
+ if (cifsFile->srch_inf.last_entry)
+ cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+ cifsFile);
if (rc)
return -ENOENT;
}
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 7cacba12b8f..80d85088193 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -209,7 +209,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
{
int rc;
int len;
- __u16 wpwd[129];
+ __le16 wpwd[129];
/* Password cannot be longer than 128 characters */
if (passwd) /* Password must be converted to NT unicode */
@@ -219,8 +219,8 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
*wpwd = 0; /* Ensure string is null terminated */
}
- rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16));
- memset(wpwd, 0, 129 * sizeof(__u16));
+ rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
+ memset(wpwd, 0, 129 * sizeof(__le16));
return rc;
}
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index ca418aaf635..9d8715c45f2 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -292,7 +292,7 @@ int __init configfs_inode_init(void)
return bdi_init(&configfs_backing_dev_info);
}
-void __exit configfs_inode_exit(void)
+void configfs_inode_exit(void)
{
bdi_destroy(&configfs_backing_dev_info);
}
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index ecc62178bed..276e15cafd5 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -143,28 +143,26 @@ static int __init configfs_init(void)
goto out;
config_kobj = kobject_create_and_add("config", kernel_kobj);
- if (!config_kobj) {
- kmem_cache_destroy(configfs_dir_cachep);
- configfs_dir_cachep = NULL;
- goto out;
- }
+ if (!config_kobj)
+ goto out2;
+
+ err = configfs_inode_init();
+ if (err)
+ goto out3;
err = register_filesystem(&configfs_fs_type);
- if (err) {
- printk(KERN_ERR "configfs: Unable to register filesystem!\n");
- kobject_put(config_kobj);
- kmem_cache_destroy(configfs_dir_cachep);
- configfs_dir_cachep = NULL;
- goto out;
- }
+ if (err)
+ goto out4;
- err = configfs_inode_init();
- if (err) {
- unregister_filesystem(&configfs_fs_type);
- kobject_put(config_kobj);
- kmem_cache_destroy(configfs_dir_cachep);
- configfs_dir_cachep = NULL;
- }
+ return 0;
+out4:
+ printk(KERN_ERR "configfs: Unable to register filesystem!\n");
+ configfs_inode_exit();
+out3:
+ kobject_put(config_kobj);
+out2:
+ kmem_cache_destroy(configfs_dir_cachep);
+ configfs_dir_cachep = NULL;
out:
return err;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index 10ba92def3f..89509b5a090 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2439,16 +2439,14 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
/**
* prepend_path - Prepend path string to a buffer
* @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
* @buffer: pointer to the end of the buffer
* @buflen: pointer to buffer length
*
* Caller holds the rename_lock.
- *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
*/
-static int prepend_path(const struct path *path, struct path *root,
+static int prepend_path(const struct path *path,
+ const struct path *root,
char **buffer, int *buflen)
{
struct dentry *dentry = path->dentry;
@@ -2483,10 +2481,10 @@ static int prepend_path(const struct path *path, struct path *root,
dentry = parent;
}
-out:
if (!error && !slash)
error = prepend(buffer, buflen, "/", 1);
+out:
br_read_unlock(vfsmount_lock);
return error;
@@ -2500,15 +2498,17 @@ global_root:
WARN(1, "Root dentry has weird name <%.*s>\n",
(int) dentry->d_name.len, dentry->d_name.name);
}
- root->mnt = vfsmnt;
- root->dentry = dentry;
+ if (!slash)
+ error = prepend(buffer, buflen, "/", 1);
+ if (!error)
+ error = vfsmnt->mnt_ns ? 1 : 2;
goto out;
}
/**
* __d_path - return the path of a dentry
* @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
* @buf: buffer to return value in
* @buflen: buffer length
*
@@ -2519,10 +2519,10 @@ global_root:
*
* "buflen" should be positive.
*
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
+ * If the path is not reachable from the supplied root, return %NULL.
*/
-char *__d_path(const struct path *path, struct path *root,
+char *__d_path(const struct path *path,
+ const struct path *root,
char *buf, int buflen)
{
char *res = buf + buflen;
@@ -2533,7 +2533,28 @@ char *__d_path(const struct path *path, struct path *root,
error = prepend_path(path, root, &res, &buflen);
write_sequnlock(&rename_lock);
- if (error)
+ if (error < 0)
+ return ERR_PTR(error);
+ if (error > 0)
+ return NULL;
+ return res;
+}
+
+char *d_absolute_path(const struct path *path,
+ char *buf, int buflen)
+{
+ struct path root = {};
+ char *res = buf + buflen;
+ int error;
+
+ prepend(&res, &buflen, "\0", 1);
+ write_seqlock(&rename_lock);
+ error = prepend_path(path, &root, &res, &buflen);
+ write_sequnlock(&rename_lock);
+
+ if (error > 1)
+ error = -EINVAL;
+ if (error < 0)
return ERR_PTR(error);
return res;
}
@@ -2541,8 +2562,9 @@ char *__d_path(const struct path *path, struct path *root,
/*
* same as __d_path but appends "(deleted)" for unlinked files.
*/
-static int path_with_deleted(const struct path *path, struct path *root,
- char **buf, int *buflen)
+static int path_with_deleted(const struct path *path,
+ const struct path *root,
+ char **buf, int *buflen)
{
prepend(buf, buflen, "\0", 1);
if (d_unlinked(path->dentry)) {
@@ -2579,7 +2601,6 @@ char *d_path(const struct path *path, char *buf, int buflen)
{
char *res = buf + buflen;
struct path root;
- struct path tmp;
int error;
/*
@@ -2594,9 +2615,8 @@ char *d_path(const struct path *path, char *buf, int buflen)
get_fs_root(current->fs, &root);
write_seqlock(&rename_lock);
- tmp = root;
- error = path_with_deleted(path, &tmp, &res, &buflen);
- if (error)
+ error = path_with_deleted(path, &root, &res, &buflen);
+ if (error < 0)
res = ERR_PTR(error);
write_sequnlock(&rename_lock);
path_put(&root);
@@ -2617,7 +2637,6 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
{
char *res = buf + buflen;
struct path root;
- struct path tmp;
int error;
if (path->dentry->d_op && path->dentry->d_op->d_dname)
@@ -2625,9 +2644,8 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
get_fs_root(current->fs, &root);
write_seqlock(&rename_lock);
- tmp = root;
- error = path_with_deleted(path, &tmp, &res, &buflen);
- if (!error && !path_equal(&tmp, &root))
+ error = path_with_deleted(path, &root, &res, &buflen);
+ if (error > 0)
error = prepend_unreachable(&res, &buflen);
write_sequnlock(&rename_lock);
path_put(&root);
@@ -2758,19 +2776,18 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
write_seqlock(&rename_lock);
if (!d_unlinked(pwd.dentry)) {
unsigned long len;
- struct path tmp = root;
char *cwd = page + PAGE_SIZE;
int buflen = PAGE_SIZE;
prepend(&cwd, &buflen, "\0", 1);
- error = prepend_path(&pwd, &tmp, &cwd, &buflen);
+ error = prepend_path(&pwd, &root, &cwd, &buflen);
write_sequnlock(&rename_lock);
- if (error)
+ if (error < 0)
goto out;
/* Unreachable from current root */
- if (!path_equal(&tmp, &root)) {
+ if (error > 0) {
error = prepend_unreachable(&cwd, &buflen);
if (error)
goto out;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 58609bde3b9..2a834255c75 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -967,7 +967,7 @@ static void ecryptfs_set_default_crypt_stat_vals(
/**
* ecryptfs_new_file_context
- * @ecryptfs_dentry: The eCryptfs dentry
+ * @ecryptfs_inode: The eCryptfs inode
*
* If the crypto context for the file has not yet been established,
* this is where we do that. Establishing a new crypto context
@@ -984,13 +984,13 @@ static void ecryptfs_set_default_crypt_stat_vals(
*
* Returns zero on success; non-zero otherwise
*/
-int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry)
+int ecryptfs_new_file_context(struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
- &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(
- ecryptfs_dentry->d_sb)->mount_crypt_stat;
+ ecryptfs_inode->i_sb)->mount_crypt_stat;
int cipher_name_len;
int rc = 0;
@@ -1299,12 +1299,12 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
}
static int
-ecryptfs_write_metadata_to_contents(struct dentry *ecryptfs_dentry,
+ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode,
char *virt, size_t virt_len)
{
int rc;
- rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt,
+ rc = ecryptfs_write_lower(ecryptfs_inode, virt,
0, virt_len);
if (rc < 0)
printk(KERN_ERR "%s: Error attempting to write header "
@@ -1338,7 +1338,8 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
/**
* ecryptfs_write_metadata
- * @ecryptfs_dentry: The eCryptfs dentry
+ * @ecryptfs_dentry: The eCryptfs dentry, which should be negative
+ * @ecryptfs_inode: The newly created eCryptfs inode
*
* Write the file headers out. This will likely involve a userspace
* callout, in which the session key is encrypted with one or more
@@ -1348,10 +1349,11 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
*
* Returns zero on success; non-zero on error
*/
-int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
+int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
+ struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
- &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
unsigned int order;
char *virt;
size_t virt_len;
@@ -1391,7 +1393,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt,
size);
else
- rc = ecryptfs_write_metadata_to_contents(ecryptfs_dentry, virt,
+ rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
virt_len);
if (rc) {
printk(KERN_ERR "%s: Error writing metadata out to lower file; "
@@ -1943,7 +1945,7 @@ static unsigned char *portable_filename_chars = ("-.0123456789ABCD"
/* We could either offset on every reverse map or just pad some 0x00's
* at the front here */
-static const unsigned char filename_rev_map[] = {
+static const unsigned char filename_rev_map[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
@@ -1959,7 +1961,7 @@ static const unsigned char filename_rev_map[] = {
0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */
0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */
0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */
- 0x3D, 0x3E, 0x3F
+ 0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */
};
/**
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 54481a3b2c7..a9f29b12fbf 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -584,9 +584,10 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat);
int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode);
int ecryptfs_encrypt_page(struct page *page);
int ecryptfs_decrypt_page(struct page *page);
-int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry);
+int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
+ struct inode *ecryptfs_inode);
int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry);
-int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry);
+int ecryptfs_new_file_context(struct inode *ecryptfs_inode);
void ecryptfs_write_crypt_stat_flags(char *page_virt,
struct ecryptfs_crypt_stat *crypt_stat,
size_t *written);
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index c6ac98cf9ba..d3f95f941c4 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -139,6 +139,27 @@ out:
return rc;
}
+static void ecryptfs_vma_close(struct vm_area_struct *vma)
+{
+ filemap_write_and_wait(vma->vm_file->f_mapping);
+}
+
+static const struct vm_operations_struct ecryptfs_file_vm_ops = {
+ .close = ecryptfs_vma_close,
+ .fault = filemap_fault,
+};
+
+static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int rc;
+
+ rc = generic_file_mmap(file, vma);
+ if (!rc)
+ vma->vm_ops = &ecryptfs_file_vm_ops;
+
+ return rc;
+}
+
struct kmem_cache *ecryptfs_file_info_cache;
/**
@@ -349,7 +370,7 @@ const struct file_operations ecryptfs_main_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl,
#endif
- .mmap = generic_file_mmap,
+ .mmap = ecryptfs_file_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
.release = ecryptfs_release,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index a36d327f152..32f90a3ae63 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -172,22 +172,23 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
* it. It will also update the eCryptfs directory inode to mimic the
* stat of the lower directory inode.
*
- * Returns zero on success; non-zero on error condition
+ * Returns the new eCryptfs inode on success; an ERR_PTR on error condition
*/
-static int
+static struct inode *
ecryptfs_do_create(struct inode *directory_inode,
struct dentry *ecryptfs_dentry, int mode)
{
int rc;
struct dentry *lower_dentry;
struct dentry *lower_dir_dentry;
+ struct inode *inode;
lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
lower_dir_dentry = lock_parent(lower_dentry);
if (IS_ERR(lower_dir_dentry)) {
ecryptfs_printk(KERN_ERR, "Error locking directory of "
"dentry\n");
- rc = PTR_ERR(lower_dir_dentry);
+ inode = ERR_CAST(lower_dir_dentry);
goto out;
}
rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode,
@@ -195,20 +196,19 @@ ecryptfs_do_create(struct inode *directory_inode,
if (rc) {
printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
"rc = [%d]\n", __func__, rc);
+ inode = ERR_PTR(rc);
goto out_lock;
}
- rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
- directory_inode->i_sb);
- if (rc) {
- ecryptfs_printk(KERN_ERR, "Failure in ecryptfs_interpose\n");
+ inode = __ecryptfs_get_inode(lower_dentry->d_inode,
+ directory_inode->i_sb);
+ if (IS_ERR(inode))
goto out_lock;
- }
fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
out_lock:
unlock_dir(lower_dir_dentry);
out:
- return rc;
+ return inode;
}
/**
@@ -219,26 +219,26 @@ out:
*
* Returns zero on success
*/
-static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
+static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
+ struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
- &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
int rc = 0;
- if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
+ if (S_ISDIR(ecryptfs_inode->i_mode)) {
ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
goto out;
}
ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
- rc = ecryptfs_new_file_context(ecryptfs_dentry);
+ rc = ecryptfs_new_file_context(ecryptfs_inode);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error creating new file "
"context; rc = [%d]\n", rc);
goto out;
}
- rc = ecryptfs_get_lower_file(ecryptfs_dentry,
- ecryptfs_dentry->d_inode);
+ rc = ecryptfs_get_lower_file(ecryptfs_dentry, ecryptfs_inode);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
@@ -246,10 +246,10 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
ecryptfs_dentry->d_name.name, rc);
goto out;
}
- rc = ecryptfs_write_metadata(ecryptfs_dentry);
+ rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode);
if (rc)
printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
- ecryptfs_put_lower_file(ecryptfs_dentry->d_inode);
+ ecryptfs_put_lower_file(ecryptfs_inode);
out:
return rc;
}
@@ -269,18 +269,28 @@ static int
ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
int mode, struct nameidata *nd)
{
+ struct inode *ecryptfs_inode;
int rc;
- /* ecryptfs_do_create() calls ecryptfs_interpose() */
- rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode);
- if (unlikely(rc)) {
+ ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry,
+ mode);
+ if (unlikely(IS_ERR(ecryptfs_inode))) {
ecryptfs_printk(KERN_WARNING, "Failed to create file in"
"lower filesystem\n");
+ rc = PTR_ERR(ecryptfs_inode);
goto out;
}
/* At this point, a file exists on "disk"; we need to make sure
* that this on disk file is prepared to be an ecryptfs file */
- rc = ecryptfs_initialize_file(ecryptfs_dentry);
+ rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
+ if (rc) {
+ drop_nlink(ecryptfs_inode);
+ unlock_new_inode(ecryptfs_inode);
+ iput(ecryptfs_inode);
+ goto out;
+ }
+ d_instantiate(ecryptfs_dentry, ecryptfs_inode);
+ unlock_new_inode(ecryptfs_inode);
out:
return rc;
}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 61fa9e1614a..607b1557d29 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1095,7 +1095,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
- neh->eh_depth = cpu_to_le16(neh->eh_depth + 1);
+ neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
ext4_mark_inode_dirty(handle, inode);
out:
brelse(bh);
@@ -2955,7 +2955,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
/* Pre-conditions */
BUG_ON(!ext4_ext_is_uninitialized(ex));
BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
- BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len);
/*
* Attempt to transfer newly initialized blocks from the currently
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index fffec40d599..92655fd8965 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1339,8 +1339,11 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
clear_buffer_unwritten(bh);
}
- /* skip page if block allocation undone */
- if (buffer_delay(bh) || buffer_unwritten(bh))
+ /*
+ * skip page if block allocation undone and
+ * block is dirty
+ */
+ if (ext4_bh_delay_or_unwritten(NULL, bh))
skip_page = 1;
bh = bh->b_this_page;
block_start += bh->b_size;
@@ -2387,7 +2390,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
pgoff_t index;
struct inode *inode = mapping->host;
handle_t *handle;
- loff_t page_len;
index = pos >> PAGE_CACHE_SHIFT;
@@ -2434,13 +2436,6 @@ retry:
*/
if (pos + len > inode->i_size)
ext4_truncate_failed_write(inode);
- } else {
- page_len = pos & (PAGE_CACHE_SIZE - 1);
- if (page_len > 0) {
- ret = ext4_discard_partial_page_buffers_no_lock(handle,
- inode, page, pos - page_len, page_len,
- EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
- }
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -2483,7 +2478,6 @@ static int ext4_da_write_end(struct file *file,
loff_t new_i_size;
unsigned long start, end;
int write_mode = (int)(unsigned long)fsdata;
- loff_t page_len;
if (write_mode == FALL_BACK_TO_NONDELALLOC) {
if (ext4_should_order_data(inode)) {
@@ -2508,7 +2502,7 @@ static int ext4_da_write_end(struct file *file,
*/
new_i_size = pos + copied;
- if (new_i_size > EXT4_I(inode)->i_disksize) {
+ if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
if (ext4_da_should_update_i_disksize(page, end)) {
down_write(&EXT4_I(inode)->i_data_sem);
if (new_i_size > EXT4_I(inode)->i_disksize) {
@@ -2532,16 +2526,6 @@ static int ext4_da_write_end(struct file *file,
}
ret2 = generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
-
- page_len = PAGE_CACHE_SIZE -
- ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
-
- if (page_len > 0) {
- ret = ext4_discard_partial_page_buffers_no_lock(handle,
- inode, page, pos + copied - 1, page_len,
- EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
- }
-
copied = ret2;
if (ret2 < 0)
ret = ret2;
@@ -2781,10 +2765,11 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
iocb->private, io_end->inode->i_ino, iocb, offset,
size);
+ iocb->private = NULL;
+
/* if not aio dio with unwritten extents, just free io and return */
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
ext4_free_io_end(io_end);
- iocb->private = NULL;
out:
if (is_async)
aio_complete(iocb, ret, 0);
@@ -2808,7 +2793,6 @@ out:
/* queue the work to convert unwritten extents to written */
queue_work(wq, &io_end->work);
- iocb->private = NULL;
/* XXX: probably should move into the real I/O completion handler */
inode_dio_done(inode);
@@ -3203,26 +3187,8 @@ int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
- if (!page_has_buffers(page)) {
- /*
- * If the range to be discarded covers a partial block
- * we need to get the page buffers. This is because
- * partial blocks cannot be released and the page needs
- * to be updated with the contents of the block before
- * we write the zeros on top of it.
- */
- if ((from & (blocksize - 1)) ||
- ((from + length) & (blocksize - 1))) {
- create_empty_buffers(page, blocksize, 0);
- } else {
- /*
- * If there are no partial blocks,
- * there is nothing to update,
- * so we can return now
- */
- return 0;
- }
- }
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, blocksize, 0);
/* Find the buffer that contains "offset" */
bh = page_buffers(page);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 7ce1d0b19c9..7e106c810c6 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -385,6 +385,18 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
block_end = block_start + blocksize;
if (block_start >= len) {
+ /*
+ * Comments copied from block_write_full_page_endio:
+ *
+ * The page straddles i_size. It must be zeroed out on
+ * each and every writepage invocation because it may
+ * be mmapped. "A file is mapped in multiples of the
+ * page size. For a file that is not a multiple of
+ * the page size, the remaining memory is zeroed when
+ * mapped, and writes to that region are not written
+ * out to the file."
+ */
+ zero_user_segment(page, block_start, block_end);
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
continue;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1c7bbd00e7e..d0666c8d15f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1155,9 +1155,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",block_validity");
if (!test_opt(sb, INIT_INODE_TABLE))
- seq_puts(seq, ",noinit_inode_table");
+ seq_puts(seq, ",noinit_itable");
else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
- seq_printf(seq, ",init_inode_table=%u",
+ seq_printf(seq, ",init_itable=%u",
(unsigned) sbi->s_li_wait_mult);
ext4_show_quota_options(seq, sb);
@@ -1333,8 +1333,7 @@ enum {
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
- Opt_discard, Opt_nodiscard,
- Opt_init_inode_table, Opt_noinit_inode_table,
+ Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
};
static const match_table_t tokens = {
@@ -1407,9 +1406,9 @@ static const match_table_t tokens = {
{Opt_dioread_lock, "dioread_lock"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
- {Opt_init_inode_table, "init_itable=%u"},
- {Opt_init_inode_table, "init_itable"},
- {Opt_noinit_inode_table, "noinit_itable"},
+ {Opt_init_itable, "init_itable=%u"},
+ {Opt_init_itable, "init_itable"},
+ {Opt_noinit_itable, "noinit_itable"},
{Opt_err, NULL},
};
@@ -1892,7 +1891,7 @@ set_qf_format:
case Opt_dioread_lock:
clear_opt(sb, DIOREAD_NOLOCK);
break;
- case Opt_init_inode_table:
+ case Opt_init_itable:
set_opt(sb, INIT_INODE_TABLE);
if (args[0].from) {
if (match_int(&args[0], &option))
@@ -1903,7 +1902,7 @@ set_qf_format:
return 0;
sbi->s_li_wait_mult = option;
break;
- case Opt_noinit_inode_table:
+ case Opt_noinit_itable:
clear_opt(sb, INIT_INODE_TABLE);
break;
default:
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 271fde50f0e..e4c7af393c2 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -156,6 +156,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
* bdi_start_writeback - start writeback
* @bdi: the backing device to write from
* @nr_pages: the number of pages to write
+ * @reason: reason why some writeback work was initiated
*
* Description:
* This does WB_SYNC_NONE opportunistic writeback. The IO is only
@@ -1221,6 +1222,7 @@ static void wait_sb_inodes(struct super_block *sb)
* writeback_inodes_sb_nr - writeback dirty inodes from given super_block
* @sb: the superblock
* @nr: the number of pages to write
+ * @reason: reason why some writeback work initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
@@ -1249,6 +1251,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
/**
* writeback_inodes_sb - writeback dirty inodes from given super_block
* @sb: the superblock
+ * @reason: reason why some writeback work was initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
@@ -1263,6 +1266,7 @@ EXPORT_SYMBOL(writeback_inodes_sb);
/**
* writeback_inodes_sb_if_idle - start writeback if none underway
* @sb: the superblock
+ * @reason: reason why some writeback work was initiated
*
* Invoke writeback_inodes_sb if no writeback is currently underway.
* Returns 1 if writeback was started, 0 if not.
@@ -1283,6 +1287,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
* writeback_inodes_sb_if_idle - start writeback if none underway
* @sb: the superblock
* @nr: the number of pages to write
+ * @reason: reason why some writeback work was initiated
*
* Invoke writeback_inodes_sb if no writeback is currently underway.
* Returns 1 if writeback was started, 0 if not.
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 5cb8614508c..2aaf3eaaf13 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1512,7 +1512,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
else if (outarg->offset + num > file_size)
num = file_size - outarg->offset;
- while (num) {
+ while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
struct page *page;
unsigned int this_num;
@@ -1526,6 +1526,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
num -= this_num;
total_len += this_num;
+ index++;
}
req->misc.retrieve_in.offset = outarg->offset;
req->misc.retrieve_in.size = total_len;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 594f07a81c2..0c84100acd4 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1556,7 +1556,7 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
struct inode *inode = file->f_path.dentry->d_inode;
mutex_lock(&inode->i_mutex);
- if (origin != SEEK_CUR || origin != SEEK_SET) {
+ if (origin != SEEK_CUR && origin != SEEK_SET) {
retval = fuse_update_attributes(inode, NULL, file, NULL);
if (retval)
goto exit;
@@ -1567,6 +1567,10 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
offset += i_size_read(inode);
break;
case SEEK_CUR:
+ if (offset == 0) {
+ retval = file->f_pos;
+ goto exit;
+ }
offset += file->f_pos;
break;
case SEEK_DATA:
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 3e6d7275647..aa83109b943 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1138,28 +1138,28 @@ static int __init fuse_fs_init(void)
{
int err;
- err = register_filesystem(&fuse_fs_type);
- if (err)
- goto out;
-
- err = register_fuseblk();
- if (err)
- goto out_unreg;
-
fuse_inode_cachep = kmem_cache_create("fuse_inode",
sizeof(struct fuse_inode),
0, SLAB_HWCACHE_ALIGN,
fuse_inode_init_once);
err = -ENOMEM;
if (!fuse_inode_cachep)
- goto out_unreg2;
+ goto out;
+
+ err = register_fuseblk();
+ if (err)
+ goto out2;
+
+ err = register_filesystem(&fuse_fs_type);
+ if (err)
+ goto out3;
return 0;
- out_unreg2:
+ out3:
unregister_fuseblk();
- out_unreg:
- unregister_filesystem(&fuse_fs_type);
+ out2:
+ kmem_cache_destroy(fuse_inode_cachep);
out:
return err;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 6d3a1963879..cfc6d4448aa 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1048,15 +1048,12 @@ static int show_mountinfo(struct seq_file *m, void *v)
if (err)
goto out;
seq_putc(m, ' ');
- seq_path_root(m, &mnt_path, &root, " \t\n\\");
- if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
- /*
- * Mountpoint is outside root, discard that one. Ugly,
- * but less so than trying to do that in iterator in a
- * race-free way (due to renames).
- */
- return SEQ_SKIP;
- }
+
+ /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+ err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+ if (err)
+ goto out;
+
seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
show_mnt_opts(m, mnt);
@@ -2776,3 +2773,8 @@ void kern_unmount(struct vfsmount *mnt)
}
}
EXPORT_SYMBOL(kern_unmount);
+
+bool our_mnt(struct vfsmount *mnt)
+{
+ return check_mnt(mnt);
+}
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 5b5fa33b6b9..cbd1a61c110 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -548,7 +548,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
if (error)
- goto out_bdi;
+ goto out_fput;
server->ncp_filp = ncp_filp;
server->ncp_sock = sock;
@@ -559,7 +559,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
error = -EBADF;
server->info_filp = fget(data.info_fd);
if (!server->info_filp)
- goto out_fput;
+ goto out_bdi;
error = -ENOTSOCK;
sock_inode = server->info_filp->f_path.dentry->d_inode;
if (!S_ISSOCK(sock_inode->i_mode))
@@ -746,9 +746,9 @@ out_nls:
out_fput2:
if (server->info_filp)
fput(server->info_filp);
-out_fput:
- bdi_destroy(&server->bdi);
out_bdi:
+ bdi_destroy(&server->bdi);
+out_fput:
/* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
*
* The previously used put_filp(ncp_filp); was bogus, since
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index eca56d4b39c..606ef0f20ae 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -147,7 +147,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
* origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
* the cached file length
*/
- if (origin != SEEK_SET || origin != SEEK_CUR) {
+ if (origin != SEEK_SET && origin != SEEK_CUR) {
struct inode *inode = filp->f_mapping->host;
int retval = nfs_revalidate_file_size(inode, filp);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b28bb19b04f..dcda0ba7af6 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -39,6 +39,8 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h>
@@ -895,6 +897,8 @@ out:
static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
{
+ if (delegation == NULL)
+ return 0;
if ((delegation->type & fmode) != fmode)
return 0;
if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
@@ -1037,8 +1041,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
}
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
- if (delegation == NULL ||
- !can_open_delegated(delegation, fmode)) {
+ if (!can_open_delegated(delegation, fmode)) {
rcu_read_unlock();
break;
}
@@ -1092,7 +1095,12 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
if (delegation)
delegation_flags = delegation->flags;
rcu_read_unlock();
- if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+ if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+ pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
+ "returning a delegation for "
+ "OPEN(CLAIM_DELEGATE_CUR)\n",
+ NFS_CLIENT(inode)->cl_server);
+ } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
nfs_inode_set_delegation(state->inode,
data->owner->so_cred,
&data->o_res);
@@ -1424,11 +1432,9 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
goto out_no_action;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
- if (delegation != NULL &&
- test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
- rcu_read_unlock();
- goto out_no_action;
- }
+ if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
+ can_open_delegated(delegation, data->o_arg.fmode))
+ goto unlock_no_action;
rcu_read_unlock();
}
/* Update sequence id. */
@@ -1445,6 +1451,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
return;
rpc_call_start(task);
return;
+unlock_no_action:
+ rcu_read_unlock();
out_no_action:
task->tk_action = NULL;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 39914be40b0..6a7107ae6b7 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1156,11 +1156,13 @@ restart:
if (status >= 0) {
status = nfs4_reclaim_locks(state, ops);
if (status >= 0) {
+ spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
printk("%s: Lock reclaim failed!\n",
__func__);
}
+ spin_unlock(&state->state_lock);
nfs4_put_open_state(state);
goto restart;
}
@@ -1224,10 +1226,12 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
lock->ls_seqid.flags = 0;
lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
}
+ spin_unlock(&state->state_lock);
}
static void nfs4_reset_seqids(struct nfs_server *server,
@@ -1350,12 +1354,14 @@ static void nfs4_warn_keyexpired(const char *s)
static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
{
switch (error) {
+ case 0:
+ break;
case -NFS4ERR_CB_PATH_DOWN:
nfs_handle_cb_pathdown(clp);
- return 0;
+ break;
case -NFS4ERR_NO_GRACE:
nfs4_state_end_reclaim_reboot(clp);
- return 0;
+ break;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_LEASE_MOVED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
@@ -1375,13 +1381,15 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_SEQ_MISORDERED:
set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* Zero session reset errors */
- return 0;
+ break;
case -EKEYEXPIRED:
/* Nothing we can do */
nfs4_warn_keyexpired(clp->cl_hostname);
- return 0;
+ break;
+ default:
+ return error;
}
- return error;
+ return 0;
}
static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1428,7 +1436,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
struct rpc_cred *cred;
const struct nfs4_state_maintenance_ops *ops =
clp->cl_mvops->state_renewal_ops;
- int status = -NFS4ERR_EXPIRED;
+ int status;
/* Is the client already known to have an expired lease? */
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
@@ -1438,6 +1446,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
spin_unlock(&clp->cl_lock);
if (cred == NULL) {
cred = nfs4_get_setclientid_cred(clp);
+ status = -ENOKEY;
if (cred == NULL)
goto out;
}
@@ -1525,16 +1534,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
{
if (!flags)
return;
- else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+ if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
nfs41_handle_server_reboot(clp);
- else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+ if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
SEQ4_STATUS_ADMIN_STATE_REVOKED |
SEQ4_STATUS_LEASE_MOVED))
nfs41_handle_state_revoked(clp);
- else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+ if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
nfs41_handle_recallable_state_revoked(clp);
- else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+ if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
SEQ4_STATUS_BACKCHANNEL_FAULT |
SEQ4_STATUS_CB_PATH_DOWN_SESSION))
nfs41_handle_cb_path_down(clp);
@@ -1662,10 +1671,10 @@ static void nfs4_state_manager(struct nfs_client *clp)
if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
status = nfs4_check_lease(clp);
+ if (status < 0)
+ goto out_error;
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
continue;
- if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
- goto out_error;
}
/* Initialize or reset the session */
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 41d6743d303..ac258beeda3 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -625,6 +625,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
goto out_free;
+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
+ goto out_free;
+
len = argv[n].v_size * argv[n].v_nmembs;
base = (void __user *)(unsigned long)argv[n].v_base;
if (len == 0) {
@@ -842,6 +845,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case FS_IOC32_GETVERSION:
cmd = FS_IOC_GETVERSION;
break;
+ case NILFS_IOCTL_CHANGE_CPMODE:
+ case NILFS_IOCTL_DELETE_CHECKPOINT:
+ case NILFS_IOCTL_GET_CPINFO:
+ case NILFS_IOCTL_GET_CPSTAT:
+ case NILFS_IOCTL_GET_SUINFO:
+ case NILFS_IOCTL_GET_SUSTAT:
+ case NILFS_IOCTL_GET_VINFO:
+ case NILFS_IOCTL_GET_BDESCS:
+ case NILFS_IOCTL_CLEAN_SEGMENTS:
+ case NILFS_IOCTL_SYNC:
+ case NILFS_IOCTL_RESIZE:
+ case NILFS_IOCTL_SET_ALLOC_RANGE:
+ break;
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index ed553c60de8..3165aebb43c 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5699,7 +5699,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
- goto out;
+ goto out_commit;
}
dquot_free_space_nodirty(inode,
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index c1efe939c77..78b68af3b0e 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -290,7 +290,15 @@ static int ocfs2_readpage(struct file *file, struct page *page)
}
if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
+ /*
+ * Unlock the page and cycle ip_alloc_sem so that we don't
+ * busyloop waiting for ip_alloc_sem to unlock
+ */
ret = AOP_TRUNCATED_PAGE;
+ unlock_page(page);
+ unlock = 0;
+ down_read(&oi->ip_alloc_sem);
+ up_read(&oi->ip_alloc_sem);
goto out_inode_unlock;
}
@@ -563,6 +571,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
{
struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
int level;
+ wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
/* this io's submitter should not have unlocked this before we could */
BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
@@ -570,6 +579,15 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
if (ocfs2_iocb_is_sem_locked(iocb))
ocfs2_iocb_clear_sem_locked(iocb);
+ if (ocfs2_iocb_is_unaligned_aio(iocb)) {
+ ocfs2_iocb_clear_unaligned_aio(iocb);
+
+ if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) &&
+ waitqueue_active(wq)) {
+ wake_up_all(wq);
+ }
+ }
+
ocfs2_iocb_clear_rw_locked(iocb);
level = ocfs2_iocb_rw_locked_level(iocb);
@@ -863,6 +881,12 @@ struct ocfs2_write_ctxt {
struct page *w_target_page;
/*
+ * w_target_locked is used for page_mkwrite path indicating no unlocking
+ * against w_target_page in ocfs2_write_end_nolock.
+ */
+ unsigned int w_target_locked:1;
+
+ /*
* ocfs2_write_end() uses this to know what the real range to
* write in the target should be.
*/
@@ -895,6 +919,24 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
{
+ int i;
+
+ /*
+ * w_target_locked is only set to true in the page_mkwrite() case.
+ * The intent is to allow us to lock the target page from write_begin()
+ * to write_end(). The caller must hold a ref on w_target_page.
+ */
+ if (wc->w_target_locked) {
+ BUG_ON(!wc->w_target_page);
+ for (i = 0; i < wc->w_num_pages; i++) {
+ if (wc->w_target_page == wc->w_pages[i]) {
+ wc->w_pages[i] = NULL;
+ break;
+ }
+ }
+ mark_page_accessed(wc->w_target_page);
+ page_cache_release(wc->w_target_page);
+ }
ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
brelse(wc->w_di_bh);
@@ -1132,20 +1174,17 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
*/
lock_page(mmap_page);
+ /* Exit and let the caller retry */
if (mmap_page->mapping != mapping) {
+ WARN_ON(mmap_page->mapping);
unlock_page(mmap_page);
- /*
- * Sanity check - the locking in
- * ocfs2_pagemkwrite() should ensure
- * that this code doesn't trigger.
- */
- ret = -EINVAL;
- mlog_errno(ret);
+ ret = -EAGAIN;
goto out;
}
page_cache_get(mmap_page);
wc->w_pages[i] = mmap_page;
+ wc->w_target_locked = true;
} else {
wc->w_pages[i] = find_or_create_page(mapping, index,
GFP_NOFS);
@@ -1160,6 +1199,8 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
wc->w_target_page = wc->w_pages[i];
}
out:
+ if (ret)
+ wc->w_target_locked = false;
return ret;
}
@@ -1817,11 +1858,23 @@ try_again:
*/
ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
cluster_of_pages, mmap_page);
- if (ret) {
+ if (ret && ret != -EAGAIN) {
mlog_errno(ret);
goto out_quota;
}
+ /*
+ * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
+ * the target page. In this case, we exit with no error and no target
+ * page. This will trigger the caller, page_mkwrite(), to re-try
+ * the operation.
+ */
+ if (ret == -EAGAIN) {
+ BUG_ON(wc->w_target_page);
+ ret = 0;
+ goto out_quota;
+ }
+
ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
len);
if (ret) {
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 75cf3ad987a..ffb2da370a9 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -78,6 +78,7 @@ enum ocfs2_iocb_lock_bits {
OCFS2_IOCB_RW_LOCK = 0,
OCFS2_IOCB_RW_LOCK_LEVEL,
OCFS2_IOCB_SEM,
+ OCFS2_IOCB_UNALIGNED_IO,
OCFS2_IOCB_NUM_LOCKS
};
@@ -91,4 +92,17 @@ enum ocfs2_iocb_lock_bits {
clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
#define ocfs2_iocb_is_sem_locked(iocb) \
test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
+
+#define ocfs2_iocb_set_unaligned_aio(iocb) \
+ set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_clear_unaligned_aio(iocb) \
+ clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_is_unaligned_aio(iocb) \
+ test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+
+#define OCFS2_IOEND_WQ_HASH_SZ 37
+#define ocfs2_ioend_wq(v) (&ocfs2__ioend_wq[((unsigned long)(v)) %\
+ OCFS2_IOEND_WQ_HASH_SZ])
+extern wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
+
#endif /* OCFS2_FILE_H */
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9a3e6bbff27..a4e855e3690 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -216,6 +216,7 @@ struct o2hb_region {
struct list_head hr_all_item;
unsigned hr_unclean_stop:1,
+ hr_aborted_start:1,
hr_item_pinned:1,
hr_item_dropped:1;
@@ -254,6 +255,10 @@ struct o2hb_region {
* a more complete api that doesn't lead to this sort of fragility. */
atomic_t hr_steady_iterations;
+ /* terminate o2hb thread if it does not reach steady state
+ * (hr_steady_iterations == 0) within hr_unsteady_iterations */
+ atomic_t hr_unsteady_iterations;
+
char hr_dev_name[BDEVNAME_SIZE];
unsigned int hr_timeout_ms;
@@ -324,6 +329,10 @@ static void o2hb_write_timeout(struct work_struct *work)
static void o2hb_arm_write_timeout(struct o2hb_region *reg)
{
+ /* Arm writeout only after thread reaches steady state */
+ if (atomic_read(&reg->hr_steady_iterations) != 0)
+ return;
+
mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n",
O2HB_MAX_WRITE_TIMEOUT_MS);
@@ -537,9 +546,14 @@ static int o2hb_verify_crc(struct o2hb_region *reg,
return read == computed;
}
-/* We want to make sure that nobody is heartbeating on top of us --
- * this will help detect an invalid configuration. */
-static void o2hb_check_last_timestamp(struct o2hb_region *reg)
+/*
+ * Compare the slot data with what we wrote in the last iteration.
+ * If the match fails, print an appropriate error message. This is to
+ * detect errors like... another node hearting on the same slot,
+ * flaky device that is losing writes, etc.
+ * Returns 1 if check succeeds, 0 otherwise.
+ */
+static int o2hb_check_own_slot(struct o2hb_region *reg)
{
struct o2hb_disk_slot *slot;
struct o2hb_disk_heartbeat_block *hb_block;
@@ -548,13 +562,13 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
slot = &reg->hr_slots[o2nm_this_node()];
/* Don't check on our 1st timestamp */
if (!slot->ds_last_time)
- return;
+ return 0;
hb_block = slot->ds_raw_block;
if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
hb_block->hb_node == slot->ds_node_num)
- return;
+ return 1;
#define ERRSTR1 "Another node is heartbeating on device"
#define ERRSTR2 "Heartbeat generation mismatch on device"
@@ -574,6 +588,8 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
(unsigned long long)slot->ds_last_time, hb_block->hb_node,
(unsigned long long)le64_to_cpu(hb_block->hb_generation),
(unsigned long long)le64_to_cpu(hb_block->hb_seq));
+
+ return 0;
}
static inline void o2hb_prepare_block(struct o2hb_region *reg,
@@ -719,17 +735,24 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
o2nm_node_put(node);
}
-static void o2hb_set_quorum_device(struct o2hb_region *reg,
- struct o2hb_disk_slot *slot)
+static void o2hb_set_quorum_device(struct o2hb_region *reg)
{
- assert_spin_locked(&o2hb_live_lock);
-
if (!o2hb_global_heartbeat_active())
return;
- if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+ /* Prevent race with o2hb_heartbeat_group_drop_item() */
+ if (kthread_should_stop())
+ return;
+
+ /* Tag region as quorum only after thread reaches steady state */
+ if (atomic_read(&reg->hr_steady_iterations) != 0)
return;
+ spin_lock(&o2hb_live_lock);
+
+ if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+ goto unlock;
+
/*
* A region can be added to the quorum only when it sees all
* live nodes heartbeat on it. In other words, the region has been
@@ -737,13 +760,10 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
*/
if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap,
sizeof(o2hb_live_node_bitmap)))
- return;
-
- if (slot->ds_changed_samples < O2HB_LIVE_THRESHOLD)
- return;
+ goto unlock;
- printk(KERN_NOTICE "o2hb: Region %s is now a quorum device\n",
- config_item_name(&reg->hr_item));
+ printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n",
+ config_item_name(&reg->hr_item), reg->hr_dev_name);
set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
@@ -754,6 +774,8 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
o2hb_region_unpin(NULL);
+unlock:
+ spin_unlock(&o2hb_live_lock);
}
static int o2hb_check_slot(struct o2hb_region *reg,
@@ -925,8 +947,6 @@ fire_callbacks:
slot->ds_equal_samples = 0;
}
out:
- o2hb_set_quorum_device(reg, slot);
-
spin_unlock(&o2hb_live_lock);
o2hb_run_event_list(&event);
@@ -957,7 +977,8 @@ static int o2hb_highest_node(unsigned long *nodes,
static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
{
- int i, ret, highest_node, change = 0;
+ int i, ret, highest_node;
+ int membership_change = 0, own_slot_ok = 0;
unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
struct o2hb_bio_wait_ctxt write_wc;
@@ -966,7 +987,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
sizeof(configured_nodes));
if (ret) {
mlog_errno(ret);
- return ret;
+ goto bail;
}
/*
@@ -982,8 +1003,9 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
if (highest_node >= O2NM_MAX_NODES) {
- mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
- return -EINVAL;
+ mlog(ML_NOTICE, "o2hb: No configured nodes found!\n");
+ ret = -EINVAL;
+ goto bail;
}
/* No sense in reading the slots of nodes that don't exist
@@ -993,29 +1015,27 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
ret = o2hb_read_slots(reg, highest_node + 1);
if (ret < 0) {
mlog_errno(ret);
- return ret;
+ goto bail;
}
/* With an up to date view of the slots, we can check that no
* other node has been improperly configured to heartbeat in
* our slot. */
- o2hb_check_last_timestamp(reg);
+ own_slot_ok = o2hb_check_own_slot(reg);
/* fill in the proper info for our next heartbeat */
o2hb_prepare_block(reg, reg->hr_generation);
- /* And fire off the write. Note that we don't wait on this I/O
- * until later. */
ret = o2hb_issue_node_write(reg, &write_wc);
if (ret < 0) {
mlog_errno(ret);
- return ret;
+ goto bail;
}
i = -1;
while((i = find_next_bit(configured_nodes,
O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
- change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
+ membership_change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
}
/*
@@ -1030,18 +1050,39 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
* disk */
mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
write_wc.wc_error, reg->hr_dev_name);
- return write_wc.wc_error;
+ ret = write_wc.wc_error;
+ goto bail;
}
- o2hb_arm_write_timeout(reg);
+ /* Skip disarming the timeout if own slot has stale/bad data */
+ if (own_slot_ok) {
+ o2hb_set_quorum_device(reg);
+ o2hb_arm_write_timeout(reg);
+ }
+bail:
/* let the person who launched us know when things are steady */
- if (!change && (atomic_read(&reg->hr_steady_iterations) != 0)) {
- if (atomic_dec_and_test(&reg->hr_steady_iterations))
+ if (atomic_read(&reg->hr_steady_iterations) != 0) {
+ if (!ret && own_slot_ok && !membership_change) {
+ if (atomic_dec_and_test(&reg->hr_steady_iterations))
+ wake_up(&o2hb_steady_queue);
+ }
+ }
+
+ if (atomic_read(&reg->hr_steady_iterations) != 0) {
+ if (atomic_dec_and_test(&reg->hr_unsteady_iterations)) {
+ printk(KERN_NOTICE "o2hb: Unable to stabilize "
+ "heartbeart on region %s (%s)\n",
+ config_item_name(&reg->hr_item),
+ reg->hr_dev_name);
+ atomic_set(&reg->hr_steady_iterations, 0);
+ reg->hr_aborted_start = 1;
wake_up(&o2hb_steady_queue);
+ ret = -EIO;
+ }
}
- return 0;
+ return ret;
}
/* Subtract b from a, storing the result in a. a *must* have a larger
@@ -1095,7 +1136,8 @@ static int o2hb_thread(void *data)
/* Pin node */
o2nm_depend_this_node();
- while (!kthread_should_stop() && !reg->hr_unclean_stop) {
+ while (!kthread_should_stop() &&
+ !reg->hr_unclean_stop && !reg->hr_aborted_start) {
/* We track the time spent inside
* o2hb_do_disk_heartbeat so that we avoid more than
* hr_timeout_ms between disk writes. On busy systems
@@ -1103,10 +1145,7 @@ static int o2hb_thread(void *data)
* likely to time itself out. */
do_gettimeofday(&before_hb);
- i = 0;
- do {
- ret = o2hb_do_disk_heartbeat(reg);
- } while (ret && ++i < 2);
+ ret = o2hb_do_disk_heartbeat(reg);
do_gettimeofday(&after_hb);
elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
@@ -1117,7 +1156,8 @@ static int o2hb_thread(void *data)
after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
elapsed_msec);
- if (elapsed_msec < reg->hr_timeout_ms) {
+ if (!kthread_should_stop() &&
+ elapsed_msec < reg->hr_timeout_ms) {
/* the kthread api has blocked signals for us so no
* need to record the return value. */
msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
@@ -1134,20 +1174,20 @@ static int o2hb_thread(void *data)
* to timeout on this region when we could just as easily
* write a clear generation - thus indicating to them that
* this node has left this region.
- *
- * XXX: Should we skip this on unclean_stop? */
- o2hb_prepare_block(reg, 0);
- ret = o2hb_issue_node_write(reg, &write_wc);
- if (ret == 0) {
- o2hb_wait_on_io(reg, &write_wc);
- } else {
- mlog_errno(ret);
+ */
+ if (!reg->hr_unclean_stop && !reg->hr_aborted_start) {
+ o2hb_prepare_block(reg, 0);
+ ret = o2hb_issue_node_write(reg, &write_wc);
+ if (ret == 0)
+ o2hb_wait_on_io(reg, &write_wc);
+ else
+ mlog_errno(ret);
}
/* Unpin node */
o2nm_undepend_this_node();
- mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
+ mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n");
return 0;
}
@@ -1158,6 +1198,7 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
struct o2hb_debug_buf *db = inode->i_private;
struct o2hb_region *reg;
unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long lts;
char *buf = NULL;
int i = -1;
int out = 0;
@@ -1194,9 +1235,11 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
case O2HB_DB_TYPE_REGION_ELAPSED_TIME:
reg = (struct o2hb_region *)db->db_data;
- out += snprintf(buf + out, PAGE_SIZE - out, "%u\n",
- jiffies_to_msecs(jiffies -
- reg->hr_last_timeout_start));
+ lts = reg->hr_last_timeout_start;
+ /* If 0, it has never been set before */
+ if (lts)
+ lts = jiffies_to_msecs(jiffies - lts);
+ out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
goto done;
case O2HB_DB_TYPE_REGION_PINNED:
@@ -1426,6 +1469,8 @@ static void o2hb_region_release(struct config_item *item)
struct page *page;
struct o2hb_region *reg = to_o2hb_region(item);
+ mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name);
+
if (reg->hr_tmp_block)
kfree(reg->hr_tmp_block);
@@ -1792,7 +1837,10 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
live_threshold <<= 1;
spin_unlock(&o2hb_live_lock);
}
- atomic_set(&reg->hr_steady_iterations, live_threshold + 1);
+ ++live_threshold;
+ atomic_set(&reg->hr_steady_iterations, live_threshold);
+ /* unsteady_iterations is double the steady_iterations */
+ atomic_set(&reg->hr_unsteady_iterations, (live_threshold << 1));
hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
reg->hr_item.ci_name);
@@ -1809,14 +1857,12 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
ret = wait_event_interruptible(o2hb_steady_queue,
atomic_read(&reg->hr_steady_iterations) == 0);
if (ret) {
- /* We got interrupted (hello ptrace!). Clean up */
- spin_lock(&o2hb_live_lock);
- hb_task = reg->hr_task;
- reg->hr_task = NULL;
- spin_unlock(&o2hb_live_lock);
+ atomic_set(&reg->hr_steady_iterations, 0);
+ reg->hr_aborted_start = 1;
+ }
- if (hb_task)
- kthread_stop(hb_task);
+ if (reg->hr_aborted_start) {
+ ret = -EIO;
goto out;
}
@@ -1833,8 +1879,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
ret = -EIO;
if (hb_task && o2hb_global_heartbeat_active())
- printk(KERN_NOTICE "o2hb: Heartbeat started on region %s\n",
- config_item_name(&reg->hr_item));
+ printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n",
+ config_item_name(&reg->hr_item), reg->hr_dev_name);
out:
if (filp)
@@ -2092,13 +2138,6 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
/* stop the thread when the user removes the region dir */
spin_lock(&o2hb_live_lock);
- if (o2hb_global_heartbeat_active()) {
- clear_bit(reg->hr_region_num, o2hb_region_bitmap);
- clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
- if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
- quorum_region = 1;
- clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
- }
hb_task = reg->hr_task;
reg->hr_task = NULL;
reg->hr_item_dropped = 1;
@@ -2107,19 +2146,30 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
if (hb_task)
kthread_stop(hb_task);
+ if (o2hb_global_heartbeat_active()) {
+ spin_lock(&o2hb_live_lock);
+ clear_bit(reg->hr_region_num, o2hb_region_bitmap);
+ clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
+ if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+ quorum_region = 1;
+ clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
+ spin_unlock(&o2hb_live_lock);
+ printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n",
+ ((atomic_read(&reg->hr_steady_iterations) == 0) ?
+ "stopped" : "start aborted"), config_item_name(item),
+ reg->hr_dev_name);
+ }
+
/*
* If we're racing a dev_write(), we need to wake them. They will
* check reg->hr_task
*/
if (atomic_read(&reg->hr_steady_iterations) != 0) {
+ reg->hr_aborted_start = 1;
atomic_set(&reg->hr_steady_iterations, 0);
wake_up(&o2hb_steady_queue);
}
- if (o2hb_global_heartbeat_active())
- printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
- config_item_name(&reg->hr_item));
-
config_item_put(item);
if (!o2hb_global_heartbeat_active() || !quorum_region)
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index 3a5835904b3..dc45deb19e6 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -47,6 +47,7 @@
#define SC_DEBUG_NAME "sock_containers"
#define NST_DEBUG_NAME "send_tracking"
#define STATS_DEBUG_NAME "stats"
+#define NODES_DEBUG_NAME "connected_nodes"
#define SHOW_SOCK_CONTAINERS 0
#define SHOW_SOCK_STATS 1
@@ -55,6 +56,7 @@ static struct dentry *o2net_dentry;
static struct dentry *sc_dentry;
static struct dentry *nst_dentry;
static struct dentry *stats_dentry;
+static struct dentry *nodes_dentry;
static DEFINE_SPINLOCK(o2net_debug_lock);
@@ -491,53 +493,87 @@ static const struct file_operations sc_seq_fops = {
.release = sc_fop_release,
};
-int o2net_debugfs_init(void)
+static int o2net_fill_bitmap(char *buf, int len)
{
- o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
- if (!o2net_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ int i = -1, out = 0;
- nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR,
- o2net_dentry, NULL,
- &nst_seq_fops);
- if (!nst_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ o2net_fill_node_map(map, sizeof(map));
- sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR,
- o2net_dentry, NULL,
- &sc_seq_fops);
- if (!sc_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
+ out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
+ out += snprintf(buf + out, PAGE_SIZE - out, "\n");
- stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR,
- o2net_dentry, NULL,
- &stats_seq_fops);
- if (!stats_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ return out;
+}
+
+static int nodes_fop_open(struct inode *inode, struct file *file)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE));
+
+ file->private_data = buf;
return 0;
-bail:
- debugfs_remove(stats_dentry);
- debugfs_remove(sc_dentry);
- debugfs_remove(nst_dentry);
- debugfs_remove(o2net_dentry);
- return -ENOMEM;
}
+static int o2net_debug_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static ssize_t o2net_debug_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+ i_size_read(file->f_mapping->host));
+}
+
+static const struct file_operations nodes_fops = {
+ .open = nodes_fop_open,
+ .release = o2net_debug_release,
+ .read = o2net_debug_read,
+ .llseek = generic_file_llseek,
+};
+
void o2net_debugfs_exit(void)
{
+ debugfs_remove(nodes_dentry);
debugfs_remove(stats_dentry);
debugfs_remove(sc_dentry);
debugfs_remove(nst_dentry);
debugfs_remove(o2net_dentry);
}
+int o2net_debugfs_init(void)
+{
+ mode_t mode = S_IFREG|S_IRUSR;
+
+ o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
+ if (o2net_dentry)
+ nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &nst_seq_fops);
+ if (nst_dentry)
+ sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &sc_seq_fops);
+ if (sc_dentry)
+ stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &stats_seq_fops);
+ if (stats_dentry)
+ nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &nodes_fops);
+ if (nodes_dentry)
+ return 0;
+
+ o2net_debugfs_exit();
+ mlog_errno(-ENOMEM);
+ return -ENOMEM;
+}
+
#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index ad7d0c155de..044e7b58d31 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -546,7 +546,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
}
if (was_valid && !valid) {
- printk(KERN_NOTICE "o2net: no longer connected to "
+ printk(KERN_NOTICE "o2net: No longer connected to "
SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
o2net_complete_nodes_nsw(nn);
}
@@ -556,7 +556,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
cancel_delayed_work(&nn->nn_connect_expired);
printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
o2nm_this_node() > sc->sc_node->nd_num ?
- "connected to" : "accepted connection from",
+ "Connected to" : "Accepted connection from",
SC_NODEF_ARGS(sc));
}
@@ -644,7 +644,7 @@ static void o2net_state_change(struct sock *sk)
o2net_sc_queue_work(sc, &sc->sc_connect_work);
break;
default:
- printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT
+ printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT
" shutdown, state %d\n",
SC_NODEF_ARGS(sc), sk->sk_state);
o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
@@ -1035,6 +1035,25 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
return ret;
}
+/* Get a map of all nodes to which this node is currently connected to */
+void o2net_fill_node_map(unsigned long *map, unsigned bytes)
+{
+ struct o2net_sock_container *sc;
+ int node, ret;
+
+ BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
+
+ memset(map, 0, bytes);
+ for (node = 0; node < O2NM_MAX_NODES; ++node) {
+ o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret);
+ if (!ret) {
+ set_bit(node, map);
+ sc_put(sc);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(o2net_fill_node_map);
+
int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
size_t caller_veclen, u8 target_node, int *status)
{
@@ -1285,11 +1304,11 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
- mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol "
- "version %llu but %llu is required, disconnecting\n",
- SC_NODEF_ARGS(sc),
- (unsigned long long)be64_to_cpu(hand->protocol_version),
- O2NET_PROTOCOL_VERSION);
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net "
+ "protocol version %llu but %llu is required. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ (unsigned long long)be64_to_cpu(hand->protocol_version),
+ O2NET_PROTOCOL_VERSION);
/* don't bother reconnecting if its the wrong version. */
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
@@ -1303,33 +1322,33 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
*/
if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
o2net_idle_timeout()) {
- mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of "
- "%u ms, but we use %u ms locally. disconnecting\n",
- SC_NODEF_ARGS(sc),
- be32_to_cpu(hand->o2net_idle_timeout_ms),
- o2net_idle_timeout());
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network "
+ "idle timeout of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ be32_to_cpu(hand->o2net_idle_timeout_ms),
+ o2net_idle_timeout());
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
o2net_keepalive_delay()) {
- mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of "
- "%u ms, but we use %u ms locally. disconnecting\n",
- SC_NODEF_ARGS(sc),
- be32_to_cpu(hand->o2net_keepalive_delay_ms),
- o2net_keepalive_delay());
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive "
+ "delay of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ be32_to_cpu(hand->o2net_keepalive_delay_ms),
+ o2net_keepalive_delay());
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) !=
O2HB_MAX_WRITE_TIMEOUT_MS) {
- mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of "
- "%u ms, but we use %u ms locally. disconnecting\n",
- SC_NODEF_ARGS(sc),
- be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
- O2HB_MAX_WRITE_TIMEOUT_MS);
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat "
+ "timeout of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
+ O2HB_MAX_WRITE_TIMEOUT_MS);
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
@@ -1540,28 +1559,16 @@ static void o2net_idle_timer(unsigned long data)
{
struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
-
#ifdef CONFIG_DEBUG_FS
- ktime_t now = ktime_get();
+ unsigned long msecs = ktime_to_ms(ktime_get()) -
+ ktime_to_ms(sc->sc_tv_timer);
+#else
+ unsigned long msecs = o2net_idle_timeout();
#endif
- printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
- "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
- o2net_idle_timeout() / 1000,
- o2net_idle_timeout() % 1000);
-
-#ifdef CONFIG_DEBUG_FS
- mlog(ML_NOTICE, "Here are some times that might help debug the "
- "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
- "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
- (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
- (long long)ktime_to_us(sc->sc_tv_data_ready),
- (long long)ktime_to_us(sc->sc_tv_advance_start),
- (long long)ktime_to_us(sc->sc_tv_advance_stop),
- sc->sc_msg_key, sc->sc_msg_type,
- (long long)ktime_to_us(sc->sc_tv_func_start),
- (long long)ktime_to_us(sc->sc_tv_func_stop));
-#endif
+ printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been "
+ "idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc),
+ msecs / 1000, msecs % 1000);
/*
* Initialize the nn_timeout so that the next connection attempt
@@ -1694,8 +1701,8 @@ static void o2net_start_connect(struct work_struct *work)
out:
if (ret) {
- mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed "
- "with errno %d\n", SC_NODEF_ARGS(sc), ret);
+ printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
+ " failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
/* 0 err so that another will be queued and attempted
* from set_nn_state */
if (sc)
@@ -1718,8 +1725,8 @@ static void o2net_connect_expired(struct work_struct *work)
spin_lock(&nn->nn_lock);
if (!nn->nn_sc_valid) {
- mlog(ML_ERROR, "no connection established with node %u after "
- "%u.%u seconds, giving up and returning errors.\n",
+ printk(KERN_NOTICE "o2net: No connection established with "
+ "node %u after %u.%u seconds, giving up.\n",
o2net_num_from_nn(nn),
o2net_idle_timeout() / 1000,
o2net_idle_timeout() % 1000);
@@ -1862,21 +1869,21 @@ static int o2net_accept_one(struct socket *sock)
node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
if (node == NULL) {
- mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n",
- &sin.sin_addr.s_addr, ntohs(sin.sin_port));
+ printk(KERN_NOTICE "o2net: Attempt to connect from unknown "
+ "node at %pI4:%d\n", &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
if (o2nm_this_node() >= node->nd_num) {
local_node = o2nm_get_node_by_num(o2nm_this_node());
- mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' ("
- "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n",
- local_node->nd_name, local_node->nd_num,
- &(local_node->nd_ipv4_address),
- ntohs(local_node->nd_ipv4_port),
- node->nd_name, node->nd_num, &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
+ printk(KERN_NOTICE "o2net: Unexpected connect attempt seen "
+ "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
+ "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
+ &(local_node->nd_ipv4_address),
+ ntohs(local_node->nd_ipv4_port), node->nd_name,
+ node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
@@ -1901,10 +1908,10 @@ static int o2net_accept_one(struct socket *sock)
ret = 0;
spin_unlock(&nn->nn_lock);
if (ret) {
- mlog(ML_NOTICE, "attempt to connect from node '%s' at "
- "%pI4:%d but it already has an open connection\n",
- node->nd_name, &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
+ printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' "
+ "at %pI4:%d but it already has an open connection\n",
+ node->nd_name, &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
goto out;
}
@@ -1984,7 +1991,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0) {
- mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret);
+ printk(KERN_ERR "o2net: Error %d while creating socket\n", ret);
goto out;
}
@@ -2001,16 +2008,15 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
sock->sk->sk_reuse = 1;
ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
if (ret < 0) {
- mlog(ML_ERROR, "unable to bind socket at %pI4:%u, "
- "ret=%d\n", &addr, ntohs(port), ret);
+ printk(KERN_ERR "o2net: Error %d while binding socket at "
+ "%pI4:%u\n", ret, &addr, ntohs(port));
goto out;
}
ret = sock->ops->listen(sock, 64);
- if (ret < 0) {
- mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n",
- &addr, ntohs(port), ret);
- }
+ if (ret < 0)
+ printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n",
+ ret, &addr, ntohs(port));
out:
if (ret) {
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
index fd6179eb26d..5bada2a69b5 100644
--- a/fs/ocfs2/cluster/tcp.h
+++ b/fs/ocfs2/cluster/tcp.h
@@ -106,6 +106,8 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
struct list_head *unreg_list);
void o2net_unregister_handler_list(struct list_head *list);
+void o2net_fill_node_map(unsigned long *map, unsigned bytes);
+
struct o2nm_node;
int o2net_register_hb_callbacks(void);
void o2net_unregister_hb_callbacks(void);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index e2878b5895f..8fe4e2892ab 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1184,8 +1184,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
if (pde)
le16_add_cpu(&pde->rec_len,
le16_to_cpu(de->rec_len));
- else
- de->inode = 0;
+ de->inode = 0;
dir->i_version++;
ocfs2_journal_dirty(handle, bh);
goto bail;
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index d602abb51b6..a5952ceecba 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -859,8 +859,8 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
-int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
-int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
void dlm_put(struct dlm_ctxt *dlm);
struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
@@ -877,9 +877,8 @@ static inline void dlm_lockres_get(struct dlm_lock_resource *res)
kref_get(&res->refs);
}
void dlm_lockres_put(struct dlm_lock_resource *res);
-void __dlm_unhash_lockres(struct dlm_lock_resource *res);
-void __dlm_insert_lockres(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res);
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
const char *name,
unsigned int len,
@@ -902,46 +901,15 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
const char *name,
unsigned int namelen);
-#define dlm_lockres_set_refmap_bit(bit,res) \
- __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__)
-#define dlm_lockres_clear_refmap_bit(bit,res) \
- __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__)
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit);
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit);
-static inline void __dlm_lockres_set_refmap_bit(int bit,
- struct dlm_lock_resource *res,
- const char *file,
- int line)
-{
- //printk("%s:%d:%.*s: setting bit %d\n", file, line,
- // res->lockname.len, res->lockname.name, bit);
- set_bit(bit, res->refmap);
-}
-
-static inline void __dlm_lockres_clear_refmap_bit(int bit,
- struct dlm_lock_resource *res,
- const char *file,
- int line)
-{
- //printk("%s:%d:%.*s: clearing bit %d\n", file, line,
- // res->lockname.len, res->lockname.name, bit);
- clear_bit(bit, res->refmap);
-}
-
-void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- const char *file,
- int line);
-void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- int new_lockres,
- const char *file,
- int line);
-#define dlm_lockres_drop_inflight_ref(d,r) \
- __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
-#define dlm_lockres_grab_inflight_ref(d,r) \
- __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
-#define dlm_lockres_grab_inflight_ref_new(d,r) \
- __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 6ed6b95dcf9..92f2ead0fab 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -157,16 +157,18 @@ static int dlm_protocol_compare(struct dlm_protocol_version *existing,
static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
-void __dlm_unhash_lockres(struct dlm_lock_resource *lockres)
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
- if (!hlist_unhashed(&lockres->hash_node)) {
- hlist_del_init(&lockres->hash_node);
- dlm_lockres_put(lockres);
- }
+ if (hlist_unhashed(&res->hash_node))
+ return;
+
+ mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+ hlist_del_init(&res->hash_node);
+ dlm_lockres_put(res);
}
-void __dlm_insert_lockres(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res)
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
struct hlist_head *bucket;
struct qstr *q;
@@ -180,6 +182,9 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
dlm_lockres_get(res);
hlist_add_head(&res->hash_node, bucket);
+
+ mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
}
struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
@@ -539,17 +544,17 @@ again:
static void __dlm_print_nodes(struct dlm_ctxt *dlm)
{
- int node = -1;
+ int node = -1, num = 0;
assert_spin_locked(&dlm->spinlock);
- printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name);
-
+ printk("( ");
while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
node + 1)) < O2NM_MAX_NODES) {
printk("%d ", node);
+ ++num;
}
- printk("\n");
+ printk(") %u nodes\n", num);
}
static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -566,11 +571,10 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
node = exit_msg->node_idx;
- printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
-
spin_lock(&dlm->spinlock);
clear_bit(node, dlm->domain_map);
clear_bit(node, dlm->exit_domain_map);
+ printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
__dlm_print_nodes(dlm);
/* notify anything attached to the heartbeat events */
@@ -755,6 +759,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
dlm_mark_domain_leaving(dlm);
dlm_leave_domain(dlm);
+ printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
dlm_force_free_mles(dlm);
dlm_complete_dlm_shutdown(dlm);
}
@@ -970,7 +975,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
clear_bit(assert->node_idx, dlm->exit_domain_map);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
- printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n",
+ printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ",
assert->node_idx, dlm->name);
__dlm_print_nodes(dlm);
@@ -1701,8 +1706,10 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
bail:
spin_lock(&dlm->spinlock);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
- if (!status)
+ if (!status) {
+ printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
__dlm_print_nodes(dlm);
+ }
spin_unlock(&dlm->spinlock);
if (ctxt) {
@@ -2131,13 +2138,6 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
goto leave;
}
- if (!o2hb_check_local_node_heartbeating()) {
- mlog(ML_ERROR, "the local node has not been configured, or is "
- "not heartbeating\n");
- ret = -EPROTO;
- goto leave;
- }
-
mlog(0, "register called for domain \"%s\"\n", domain);
retry:
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 8d39e0fd66f..975810b9849 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
kick_thread = 1;
}
}
- /* reduce the inflight count, this may result in the lockres
- * being purged below during calc_usage */
- if (lock->ml.node == dlm->node_num)
- dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
@@ -231,10 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
lock->ml.type, res->lockname.len,
res->lockname.name, flags);
+ /*
+ * Wait if resource is getting recovered, remastered, etc.
+ * If the resource was remastered and new owner is self, then exit.
+ */
spin_lock(&res->spinlock);
-
- /* will exit this call with spinlock held */
__dlm_wait_on_lockres(res);
+ if (res->owner == dlm->node_num) {
+ spin_unlock(&res->spinlock);
+ return DLM_RECOVERING;
+ }
res->state |= DLM_LOCK_RES_IN_PROGRESS;
/* add lock to local (secondary) queue */
@@ -319,27 +321,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
sizeof(create), res->owner, &status);
if (tmpret >= 0) {
- // successfully sent and received
- ret = status; // this is already a dlm_status
+ ret = status;
if (ret == DLM_REJECTED) {
- mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres "
- "no longer owned by %u. that node is coming back "
- "up currently.\n", dlm->name, create.namelen,
+ mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
+ "owned by node %u. That node is coming back up "
+ "currently.\n", dlm->name, create.namelen,
create.name, res->owner);
dlm_print_one_lock_resource(res);
BUG();
}
} else {
- mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
- "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key,
- res->owner);
- if (dlm_is_host_down(tmpret)) {
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
+ "node %u\n", dlm->name, create.namelen, create.name,
+ tmpret, res->owner);
+ if (dlm_is_host_down(tmpret))
ret = DLM_RECOVERING;
- mlog(0, "node %u died so returning DLM_RECOVERING "
- "from lock message!\n", res->owner);
- } else {
+ else
ret = dlm_err_to_dlm_status(tmpret);
- }
}
return ret;
@@ -440,7 +438,7 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
/* zero memory only if kernel-allocated */
lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
if (!lksb) {
- kfree(lock);
+ kmem_cache_free(dlm_lock_cache, lock);
return NULL;
}
kernel_allocated = 1;
@@ -718,18 +716,10 @@ retry_lock:
if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
status == DLM_FORWARD) {
- mlog(0, "retrying lock with migration/"
- "recovery/in progress\n");
msleep(100);
- /* no waiting for dlm_reco_thread */
if (recovery) {
if (status != DLM_RECOVERING)
goto retry_lock;
-
- mlog(0, "%s: got RECOVERING "
- "for $RECOVERY lock, master "
- "was %u\n", dlm->name,
- res->owner);
/* wait to see the node go down, then
* drop down and allow the lockres to
* get cleaned up. need to remaster. */
@@ -741,6 +731,14 @@ retry_lock:
}
}
+ /* Inflight taken in dlm_get_lock_resource() is dropped here */
+ spin_lock(&res->spinlock);
+ dlm_lockres_drop_inflight_ref(dlm, res);
+ spin_unlock(&res->spinlock);
+
+ dlm_lockres_calc_usage(dlm, res);
+ dlm_kick_thread(dlm, res);
+
if (status != DLM_NORMAL) {
lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
if (status != DLM_NOTQUEUED)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 11eefb8c12e..005261c333b 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -631,39 +631,54 @@ error:
return NULL;
}
-void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- int new_lockres,
- const char *file,
- int line)
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit)
{
- if (!new_lockres)
- assert_spin_locked(&res->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
+ res->lockname.name, bit, __builtin_return_address(0));
+
+ set_bit(bit, res->refmap);
+}
+
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit)
+{
+ assert_spin_locked(&res->spinlock);
+
+ mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
+ res->lockname.name, bit, __builtin_return_address(0));
+
+ clear_bit(bit, res->refmap);
+}
+
+
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&res->spinlock);
- if (!test_bit(dlm->node_num, res->refmap)) {
- BUG_ON(res->inflight_locks != 0);
- dlm_lockres_set_refmap_bit(dlm->node_num, res);
- }
res->inflight_locks++;
- mlog(0, "%s:%.*s: inflight++: now %u\n",
- dlm->name, res->lockname.len, res->lockname.name,
- res->inflight_locks);
+
+ mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
+ res->lockname.len, res->lockname.name, res->inflight_locks,
+ __builtin_return_address(0));
}
-void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- const char *file,
- int line)
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
{
assert_spin_locked(&res->spinlock);
BUG_ON(res->inflight_locks == 0);
+
res->inflight_locks--;
- mlog(0, "%s:%.*s: inflight--: now %u\n",
- dlm->name, res->lockname.len, res->lockname.name,
- res->inflight_locks);
- if (res->inflight_locks == 0)
- dlm_lockres_clear_refmap_bit(dlm->node_num, res);
+
+ mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
+ res->lockname.len, res->lockname.name, res->inflight_locks,
+ __builtin_return_address(0));
+
wake_up(&res->wq);
}
@@ -697,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
unsigned int hash;
int tries = 0;
int bit, wait_on_recovery = 0;
- int drop_inflight_if_nonlocal = 0;
BUG_ON(!lockid);
@@ -709,36 +723,33 @@ lookup:
spin_lock(&dlm->spinlock);
tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
if (tmpres) {
- int dropping_ref = 0;
-
spin_unlock(&dlm->spinlock);
-
spin_lock(&tmpres->spinlock);
- /* We wait for the other thread that is mastering the resource */
+ /* Wait on the thread that is mastering the resource */
if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
__dlm_wait_on_lockres(tmpres);
BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
+ spin_unlock(&tmpres->spinlock);
+ dlm_lockres_put(tmpres);
+ tmpres = NULL;
+ goto lookup;
}
- if (tmpres->owner == dlm->node_num) {
- BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
- dlm_lockres_grab_inflight_ref(dlm, tmpres);
- } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
- dropping_ref = 1;
- spin_unlock(&tmpres->spinlock);
-
- /* wait until done messaging the master, drop our ref to allow
- * the lockres to be purged, start over. */
- if (dropping_ref) {
- spin_lock(&tmpres->spinlock);
- __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
+ /* Wait on the resource purge to complete before continuing */
+ if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
+ BUG_ON(tmpres->owner == dlm->node_num);
+ __dlm_wait_on_lockres_flags(tmpres,
+ DLM_LOCK_RES_DROPPING_REF);
spin_unlock(&tmpres->spinlock);
dlm_lockres_put(tmpres);
tmpres = NULL;
goto lookup;
}
- mlog(0, "found in hash!\n");
+ /* Grab inflight ref to pin the resource */
+ dlm_lockres_grab_inflight_ref(dlm, tmpres);
+
+ spin_unlock(&tmpres->spinlock);
if (res)
dlm_lockres_put(res);
res = tmpres;
@@ -829,8 +840,8 @@ lookup:
* but they might own this lockres. wait on them. */
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
if (bit < O2NM_MAX_NODES) {
- mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
- "recover before lock mastery can begin\n",
+ mlog(0, "%s: res %.*s, At least one node (%d) "
+ "to recover before lock mastery can begin\n",
dlm->name, namelen, (char *)lockid, bit);
wait_on_recovery = 1;
}
@@ -843,12 +854,11 @@ lookup:
/* finally add the lockres to its hash bucket */
__dlm_insert_lockres(dlm, res);
- /* since this lockres is new it doesn't not require the spinlock */
- dlm_lockres_grab_inflight_ref_new(dlm, res);
- /* if this node does not become the master make sure to drop
- * this inflight reference below */
- drop_inflight_if_nonlocal = 1;
+ /* Grab inflight ref to pin the resource */
+ spin_lock(&res->spinlock);
+ dlm_lockres_grab_inflight_ref(dlm, res);
+ spin_unlock(&res->spinlock);
/* get an extra ref on the mle in case this is a BLOCK
* if so, the creator of the BLOCK may try to put the last
@@ -864,8 +874,8 @@ redo_request:
* dlm spinlock would be detectable be a change on the mle,
* so we only need to clear out the recovery map once. */
if (dlm_is_recovery_lock(lockid, namelen)) {
- mlog(ML_NOTICE, "%s: recovery map is not empty, but "
- "must master $RECOVERY lock now\n", dlm->name);
+ mlog(0, "%s: Recovery map is not empty, but must "
+ "master $RECOVERY lock now\n", dlm->name);
if (!dlm_pre_master_reco_lockres(dlm, res))
wait_on_recovery = 0;
else {
@@ -883,8 +893,8 @@ redo_request:
spin_lock(&dlm->spinlock);
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
if (bit < O2NM_MAX_NODES) {
- mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
- "recover before lock mastery can begin\n",
+ mlog(0, "%s: res %.*s, At least one node (%d) "
+ "to recover before lock mastery can begin\n",
dlm->name, namelen, (char *)lockid, bit);
wait_on_recovery = 1;
} else
@@ -913,8 +923,8 @@ redo_request:
* yet, keep going until it does. this is how the
* master will know that asserts are needed back to
* the lower nodes. */
- mlog(0, "%s:%.*s: requests only up to %u but master "
- "is %u, keep going\n", dlm->name, namelen,
+ mlog(0, "%s: res %.*s, Requests only up to %u but "
+ "master is %u, keep going\n", dlm->name, namelen,
lockid, nodenum, mle->master);
}
}
@@ -924,13 +934,12 @@ wait:
ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
if (ret < 0) {
wait_on_recovery = 1;
- mlog(0, "%s:%.*s: node map changed, redo the "
- "master request now, blocked=%d\n",
- dlm->name, res->lockname.len,
+ mlog(0, "%s: res %.*s, Node map changed, redo the master "
+ "request now, blocked=%d\n", dlm->name, res->lockname.len,
res->lockname.name, blocked);
if (++tries > 20) {
- mlog(ML_ERROR, "%s:%.*s: spinning on "
- "dlm_wait_for_lock_mastery, blocked=%d\n",
+ mlog(ML_ERROR, "%s: res %.*s, Spinning on "
+ "dlm_wait_for_lock_mastery, blocked = %d\n",
dlm->name, res->lockname.len,
res->lockname.name, blocked);
dlm_print_one_lock_resource(res);
@@ -940,7 +949,8 @@ wait:
goto redo_request;
}
- mlog(0, "lockres mastered by %u\n", res->owner);
+ mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
+ res->lockname.name, res->owner);
/* make sure we never continue without this */
BUG_ON(res->owner == O2NM_MAX_NODES);
@@ -952,8 +962,6 @@ wait:
wake_waiters:
spin_lock(&res->spinlock);
- if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
- dlm_lockres_drop_inflight_ref(dlm, res);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
@@ -1426,9 +1434,7 @@ way_up_top:
}
if (res->owner == dlm->node_num) {
- mlog(0, "%s:%.*s: setting bit %u in refmap\n",
- dlm->name, namelen, name, request->node_idx);
- dlm_lockres_set_refmap_bit(request->node_idx, res);
+ dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
spin_unlock(&res->spinlock);
response = DLM_MASTER_RESP_YES;
if (mle)
@@ -1493,10 +1499,8 @@ way_up_top:
* go back and clean the mles on any
* other nodes */
dispatch_assert = 1;
- dlm_lockres_set_refmap_bit(request->node_idx, res);
- mlog(0, "%s:%.*s: setting bit %u in refmap\n",
- dlm->name, namelen, name,
- request->node_idx);
+ dlm_lockres_set_refmap_bit(dlm, res,
+ request->node_idx);
} else
response = DLM_MASTER_RESP_NO;
} else {
@@ -1702,7 +1706,7 @@ again:
"lockres, set the bit in the refmap\n",
namelen, lockname, to);
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(to, res);
+ dlm_lockres_set_refmap_bit(dlm, res, to);
spin_unlock(&res->spinlock);
}
}
@@ -2187,8 +2191,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
namelen = res->lockname.len;
BUG_ON(namelen > O2NM_MAX_NAME_LEN);
- mlog(0, "%s:%.*s: sending deref to %d\n",
- dlm->name, namelen, lockname, res->owner);
memset(&deref, 0, sizeof(deref));
deref.node_idx = dlm->node_num;
deref.namelen = namelen;
@@ -2197,14 +2199,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
&deref, sizeof(deref), res->owner, &r);
if (ret < 0)
- mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
- "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
- res->owner);
+ mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
+ dlm->name, namelen, lockname, ret, res->owner);
else if (r < 0) {
/* BAD. other node says I did not have a ref. */
- mlog(ML_ERROR,"while dropping ref on %s:%.*s "
- "(master=%u) got %d.\n", dlm->name, namelen,
- lockname, res->owner, r);
+ mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
+ dlm->name, namelen, lockname, res->owner, r);
dlm_print_one_lock_resource(res);
BUG();
}
@@ -2260,7 +2260,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
else {
BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
if (test_bit(node, res->refmap)) {
- dlm_lockres_clear_refmap_bit(node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, node);
cleared = 1;
}
}
@@ -2320,7 +2320,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
if (test_bit(node, res->refmap)) {
__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
- dlm_lockres_clear_refmap_bit(node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, node);
cleared = 1;
}
spin_unlock(&res->spinlock);
@@ -2802,7 +2802,8 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
BUG_ON(!list_empty(&lock->bast_list));
BUG_ON(lock->ast_pending);
BUG_ON(lock->bast_pending);
- dlm_lockres_clear_refmap_bit(lock->ml.node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res,
+ lock->ml.node);
list_del_init(&lock->list);
dlm_lock_put(lock);
/* In a normal unlock, we would have added a
@@ -2823,7 +2824,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
mlog(0, "%s:%.*s: node %u had a ref to this "
"migrating lockres, clearing\n", dlm->name,
res->lockname.len, res->lockname.name, bit);
- dlm_lockres_clear_refmap_bit(bit, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, bit);
}
bit++;
}
@@ -2916,9 +2917,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
&migrate, sizeof(migrate), nodenum,
&status);
if (ret < 0) {
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
- dlm->key, nodenum);
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send "
+ "MIGRATE_REQUEST to node %u\n", dlm->name,
+ migrate.namelen, migrate.name, ret, nodenum);
if (!dlm_is_host_down(ret)) {
mlog(ML_ERROR, "unhandled error=%d!\n", ret);
BUG();
@@ -2937,7 +2938,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
dlm->name, res->lockname.len, res->lockname.name,
nodenum);
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(nodenum, res);
+ dlm_lockres_set_refmap_bit(dlm, res, nodenum);
spin_unlock(&res->spinlock);
}
}
@@ -3271,7 +3272,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
* mastery reference here since old_master will briefly have
* a reference after the migration completes */
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(old_master, res);
+ dlm_lockres_set_refmap_bit(dlm, res, old_master);
spin_unlock(&res->spinlock);
mlog(0, "now time to do a migrate request to other nodes\n");
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 7efab6d28a2..01ebfd0bdad 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -362,40 +362,38 @@ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
}
-int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
{
- if (timeout) {
- mlog(ML_NOTICE, "%s: waiting %dms for notification of "
- "death of node %u\n", dlm->name, timeout, node);
+ if (dlm_is_node_dead(dlm, node))
+ return;
+
+ printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
+ "domain %s\n", node, dlm->name);
+
+ if (timeout)
wait_event_timeout(dlm->dlm_reco_thread_wq,
- dlm_is_node_dead(dlm, node),
- msecs_to_jiffies(timeout));
- } else {
- mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
- "of death of node %u\n", dlm->name, node);
+ dlm_is_node_dead(dlm, node),
+ msecs_to_jiffies(timeout));
+ else
wait_event(dlm->dlm_reco_thread_wq,
dlm_is_node_dead(dlm, node));
- }
- /* for now, return 0 */
- return 0;
}
-int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
{
- if (timeout) {
- mlog(0, "%s: waiting %dms for notification of "
- "recovery of node %u\n", dlm->name, timeout, node);
+ if (dlm_is_node_recovered(dlm, node))
+ return;
+
+ printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
+ "domain %s\n", node, dlm->name);
+
+ if (timeout)
wait_event_timeout(dlm->dlm_reco_thread_wq,
- dlm_is_node_recovered(dlm, node),
- msecs_to_jiffies(timeout));
- } else {
- mlog(0, "%s: waiting indefinitely for notification "
- "of recovery of node %u\n", dlm->name, node);
+ dlm_is_node_recovered(dlm, node),
+ msecs_to_jiffies(timeout));
+ else
wait_event(dlm->dlm_reco_thread_wq,
dlm_is_node_recovered(dlm, node));
- }
- /* for now, return 0 */
- return 0;
}
/* callers of the top-level api calls (dlmlock/dlmunlock) should
@@ -430,6 +428,8 @@ static void dlm_begin_recovery(struct dlm_ctxt *dlm)
{
spin_lock(&dlm->spinlock);
BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
+ printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
+ dlm->name, dlm->reco.dead_node);
dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
spin_unlock(&dlm->spinlock);
}
@@ -440,9 +440,18 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm)
BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
spin_unlock(&dlm->spinlock);
+ printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
wake_up(&dlm->reco.event);
}
+static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
+{
+ printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
+ "dead node %u in domain %s\n", dlm->reco.new_master,
+ (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
+ dlm->reco.dead_node, dlm->name);
+}
+
static int dlm_do_recovery(struct dlm_ctxt *dlm)
{
int status = 0;
@@ -505,9 +514,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
}
mlog(0, "another node will master this recovery session.\n");
}
- mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
- dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master,
- dlm->node_num, dlm->reco.dead_node);
+
+ dlm_print_recovery_master(dlm);
/* it is safe to start everything back up here
* because all of the dead node's lock resources
@@ -518,15 +526,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
return 0;
master_here:
- mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node "
- "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task),
- dlm->node_num, dlm->reco.dead_node, dlm->name);
+ dlm_print_recovery_master(dlm);
status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
if (status < 0) {
/* we should never hit this anymore */
- mlog(ML_ERROR, "error %d remastering locks for node %u, "
- "retrying.\n", status, dlm->reco.dead_node);
+ mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
+ "retrying.\n", dlm->name, status, dlm->reco.dead_node);
/* yield a bit to allow any final network messages
* to get handled on remaining nodes */
msleep(100);
@@ -567,7 +573,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
- mlog(0, "requesting lock info from node %u\n",
+ mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
ndata->node_num);
if (ndata->node_num == dlm->node_num) {
@@ -640,7 +646,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
spin_unlock(&dlm_reco_state_lock);
}
- mlog(0, "done requesting all lock info\n");
+ mlog(0, "%s: Done requesting all lock info\n", dlm->name);
/* nodes should be sending reco data now
* just need to wait */
@@ -802,10 +808,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
/* negative status is handled by caller */
if (ret < 0)
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG,
- dlm->key, request_from);
-
+ mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
+ "to recover dead node %u\n", dlm->name, ret,
+ request_from, dead_node);
// return from here, then
// sleep until all received or error
return ret;
@@ -956,9 +961,9 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
sizeof(done_msg), send_to, &tmpret);
if (ret < 0) {
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG,
- dlm->key, send_to);
+ mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
+ "to recover dead node %u\n", dlm->name, ret, send_to,
+ dead_node);
if (!dlm_is_host_down(ret)) {
BUG();
}
@@ -1127,9 +1132,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
if (ret < 0) {
/* XXX: negative status is not handled.
* this will end up killing this node. */
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG,
- dlm->key, send_to);
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
+ "node %u (%s)\n", dlm->name, mres->lockname_len,
+ mres->lockname, ret, send_to,
+ (orig_flags & DLM_MRES_MIGRATION ?
+ "migration" : "recovery"));
} else {
/* might get an -ENOMEM back here */
ret = status;
@@ -1767,7 +1774,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
dlm->name, mres->lockname_len, mres->lockname,
from);
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(from, res);
+ dlm_lockres_set_refmap_bit(dlm, res, from);
spin_unlock(&res->spinlock);
added++;
break;
@@ -1965,7 +1972,7 @@ skip_lvb:
mlog(0, "%s:%.*s: added lock for node %u, "
"setting refmap bit\n", dlm->name,
res->lockname.len, res->lockname.name, ml->node);
- dlm_lockres_set_refmap_bit(ml->node, res);
+ dlm_lockres_set_refmap_bit(dlm, res, ml->node);
added++;
}
spin_unlock(&res->spinlock);
@@ -2084,6 +2091,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
if (res->owner == dead_node) {
+ mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ res->owner, new_master);
list_del_init(&res->recovering);
spin_lock(&res->spinlock);
/* new_master has our reference from
@@ -2105,40 +2115,30 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
- if (res->state & DLM_LOCK_RES_RECOVERING) {
- if (res->owner == dead_node) {
- mlog(0, "(this=%u) res %.*s owner=%u "
- "was not on recovering list, but "
- "clearing state anyway\n",
- dlm->node_num, res->lockname.len,
- res->lockname.name, new_master);
- } else if (res->owner == dlm->node_num) {
- mlog(0, "(this=%u) res %.*s owner=%u "
- "was not on recovering list, "
- "owner is THIS node, clearing\n",
- dlm->node_num, res->lockname.len,
- res->lockname.name, new_master);
- } else
- continue;
+ if (!(res->state & DLM_LOCK_RES_RECOVERING))
+ continue;
- if (!list_empty(&res->recovering)) {
- mlog(0, "%s:%.*s: lockres was "
- "marked RECOVERING, owner=%u\n",
- dlm->name, res->lockname.len,
- res->lockname.name, res->owner);
- list_del_init(&res->recovering);
- dlm_lockres_put(res);
- }
- spin_lock(&res->spinlock);
- /* new_master has our reference from
- * the lock state sent during recovery */
- dlm_change_lockres_owner(dlm, res, new_master);
- res->state &= ~DLM_LOCK_RES_RECOVERING;
- if (__dlm_lockres_has_locks(res))
- __dlm_dirty_lockres(dlm, res);
- spin_unlock(&res->spinlock);
- wake_up(&res->wq);
+ if (res->owner != dead_node &&
+ res->owner != dlm->node_num)
+ continue;
+
+ if (!list_empty(&res->recovering)) {
+ list_del_init(&res->recovering);
+ dlm_lockres_put(res);
}
+
+ /* new_master has our reference from
+ * the lock state sent during recovery */
+ mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ res->owner, new_master);
+ spin_lock(&res->spinlock);
+ dlm_change_lockres_owner(dlm, res, new_master);
+ res->state &= ~DLM_LOCK_RES_RECOVERING;
+ if (__dlm_lockres_has_locks(res))
+ __dlm_dirty_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
}
}
}
@@ -2252,12 +2252,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
res->lockname.len, res->lockname.name, freed, dead_node);
__dlm_print_one_lock_resource(res);
}
- dlm_lockres_clear_refmap_bit(dead_node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
} else if (test_bit(dead_node, res->refmap)) {
mlog(0, "%s:%.*s: dead node %u had a ref, but had "
"no locks and had not purged before dying\n", dlm->name,
res->lockname.len, res->lockname.name, dead_node);
- dlm_lockres_clear_refmap_bit(dead_node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
}
/* do not kick thread yet */
@@ -2324,9 +2324,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
dlm_revalidate_lvb(dlm, res, dead_node);
if (res->owner == dead_node) {
if (res->state & DLM_LOCK_RES_DROPPING_REF) {
- mlog(ML_NOTICE, "Ignore %.*s for "
+ mlog(ML_NOTICE, "%s: res %.*s, Skip "
"recovery as it is being freed\n",
- res->lockname.len,
+ dlm->name, res->lockname.len,
res->lockname.name);
} else
dlm_move_lockres_to_recovery_list(dlm,
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 1d6d1d22c47..e73c833fc2a 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -94,24 +94,26 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
int bit;
+ assert_spin_locked(&res->spinlock);
+
if (__dlm_lockres_has_locks(res))
return 0;
+ /* Locks are in the process of being created */
+ if (res->inflight_locks)
+ return 0;
+
if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
return 0;
if (res->state & DLM_LOCK_RES_RECOVERING)
return 0;
+ /* Another node has this resource with this node as the master */
bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
if (bit < O2NM_MAX_NODES)
return 0;
- /*
- * since the bit for dlm->node_num is not set, inflight_locks better
- * be zero
- */
- BUG_ON(res->inflight_locks != 0);
return 1;
}
@@ -185,8 +187,6 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
/* clear our bit from the master's refmap, ignore errors */
ret = dlm_drop_lockres_ref(dlm, res);
if (ret < 0) {
- mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
- res->lockname.len, res->lockname.name, ret);
if (!dlm_is_host_down(ret))
BUG();
}
@@ -209,7 +209,7 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
BUG();
}
- __dlm_unhash_lockres(res);
+ __dlm_unhash_lockres(dlm, res);
/* lockres is not in the hash now. drop the flag and wake up
* any processes waiting in dlm_get_lock_resource. */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e1ed5e502ff..81a4cd22f80 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1692,7 +1692,7 @@ int ocfs2_open_lock(struct inode *inode)
mlog(0, "inode %llu take PRMODE open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
- if (ocfs2_mount_local(osb))
+ if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
goto out;
lockres = &OCFS2_I(inode)->ip_open_lockres;
@@ -1718,6 +1718,12 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
+ if (ocfs2_is_hard_readonly(osb)) {
+ if (write)
+ status = -EROFS;
+ goto out;
+ }
+
if (ocfs2_mount_local(osb))
goto out;
@@ -2298,7 +2304,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
if (ocfs2_is_hard_readonly(osb)) {
if (ex)
status = -EROFS;
- goto bail;
+ goto getbh;
}
if (ocfs2_mount_local(osb))
@@ -2356,7 +2362,7 @@ local:
mlog_errno(status);
goto bail;
}
-
+getbh:
if (ret_bh) {
status = ocfs2_assign_bh(inode, ret_bh, local_bh);
if (status < 0) {
@@ -2628,8 +2634,11 @@ int ocfs2_dentry_lock(struct dentry *dentry, int ex)
BUG_ON(!dl);
- if (ocfs2_is_hard_readonly(osb))
- return -EROFS;
+ if (ocfs2_is_hard_readonly(osb)) {
+ if (ex)
+ return -EROFS;
+ return 0;
+ }
if (ocfs2_mount_local(osb))
return 0;
@@ -2647,7 +2656,7 @@ void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
- if (!ocfs2_mount_local(osb))
+ if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
}
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 23457b491e8..2f5b92ef0e5 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -832,6 +832,102 @@ out:
return ret;
}
+int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ int ret;
+ unsigned int is_last = 0, is_data = 0;
+ u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+ u32 cpos, cend, clen, hole_size;
+ u64 extoff, extlen;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_extent_rec rec;
+
+ BUG_ON(origin != SEEK_DATA && origin != SEEK_HOLE);
+
+ ret = ocfs2_inode_lock(inode, &di_bh, 0);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ if (*offset >= inode->i_size) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ if (origin == SEEK_HOLE)
+ *offset = inode->i_size;
+ goto out_unlock;
+ }
+
+ clen = 0;
+ cpos = *offset >> cs_bits;
+ cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size);
+
+ while (cpos < cend && !is_last) {
+ ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
+ &rec, &is_last);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+ extoff = cpos;
+ extoff <<= cs_bits;
+
+ if (rec.e_blkno == 0ULL) {
+ clen = hole_size;
+ is_data = 0;
+ } else {
+ clen = le16_to_cpu(rec.e_leaf_clusters) -
+ (cpos - le32_to_cpu(rec.e_cpos));
+ is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1;
+ }
+
+ if ((!is_data && origin == SEEK_HOLE) ||
+ (is_data && origin == SEEK_DATA)) {
+ if (extoff > *offset)
+ *offset = extoff;
+ goto out_unlock;
+ }
+
+ if (!is_last)
+ cpos += clen;
+ }
+
+ if (origin == SEEK_HOLE) {
+ extoff = cpos;
+ extoff <<= cs_bits;
+ extlen = clen;
+ extlen <<= cs_bits;
+
+ if ((extoff + extlen) > inode->i_size)
+ extlen = inode->i_size - extoff;
+ extoff += extlen;
+ if (extoff > *offset)
+ *offset = extoff;
+ goto out_unlock;
+ }
+
+ ret = -ENXIO;
+
+out_unlock:
+
+ brelse(di_bh);
+
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ ocfs2_inode_unlock(inode, 0);
+out:
+ if (ret && ret != -ENXIO)
+ ret = -ENXIO;
+ return ret;
+}
+
int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
struct buffer_head *bhs[], int flags,
int (*validate)(struct super_block *sb,
diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h
index e79d41c2c90..67ea57d2fd5 100644
--- a/fs/ocfs2/extent_map.h
+++ b/fs/ocfs2/extent_map.h
@@ -53,6 +53,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 map_start, u64 map_len);
+int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin);
+
int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
u32 *p_cluster, u32 *num_clusters,
struct ocfs2_extent_list *el,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index de4ea1af041..6e396683c3d 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1950,6 +1950,9 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
if (ret < 0)
mlog_errno(ret);
+ if (file->f_flags & O_SYNC)
+ handle->h_sync = 1;
+
ocfs2_commit_trans(osb, handle);
out_inode_unlock:
@@ -2052,6 +2055,23 @@ out:
return ret;
}
+static void ocfs2_aiodio_wait(struct inode *inode)
+{
+ wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
+
+ wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0));
+}
+
+static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
+{
+ int blockmask = inode->i_sb->s_blocksize - 1;
+ loff_t final_size = pos + count;
+
+ if ((pos & blockmask) || (final_size & blockmask))
+ return 1;
+ return 0;
+}
+
static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
struct file *file,
loff_t pos, size_t count,
@@ -2230,6 +2250,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int full_coherency = !(osb->s_mount_opt &
OCFS2_MOUNT_COHERENCY_BUFFERED);
+ int unaligned_dio = 0;
trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2297,6 +2318,10 @@ relock:
goto out;
}
+ if (direct_io && !is_sync_kiocb(iocb))
+ unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_left,
+ *ppos);
+
/*
* We can't complete the direct I/O as requested, fall back to
* buffered I/O.
@@ -2311,6 +2336,18 @@ relock:
goto relock;
}
+ if (unaligned_dio) {
+ /*
+ * Wait on previous unaligned aio to complete before
+ * proceeding.
+ */
+ ocfs2_aiodio_wait(inode);
+
+ /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */
+ atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio);
+ ocfs2_iocb_set_unaligned_aio(iocb);
+ }
+
/*
* To later detect whether a journal commit for sync writes is
* necessary, we sample i_size, and cluster count here.
@@ -2382,8 +2419,12 @@ out_dio:
if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
rw_level = -1;
have_alloc_sem = 0;
+ unaligned_dio = 0;
}
+ if (unaligned_dio)
+ atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+
out:
if (rw_level != -1)
ocfs2_rw_unlock(inode, rw_level);
@@ -2591,6 +2632,57 @@ bail:
return ret;
}
+/* Refer generic_file_llseek_unlocked() */
+static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ int ret = 0;
+
+ mutex_lock(&inode->i_mutex);
+
+ switch (origin) {
+ case SEEK_SET:
+ break;
+ case SEEK_END:
+ offset += inode->i_size;
+ break;
+ case SEEK_CUR:
+ if (offset == 0) {
+ offset = file->f_pos;
+ goto out;
+ }
+ offset += file->f_pos;
+ break;
+ case SEEK_DATA:
+ case SEEK_HOLE:
+ ret = ocfs2_seek_data_hole_offset(file, &offset, origin);
+ if (ret)
+ goto out;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
+ ret = -EINVAL;
+ if (!ret && offset > inode->i_sb->s_maxbytes)
+ ret = -EINVAL;
+ if (ret)
+ goto out;
+
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+
+out:
+ mutex_unlock(&inode->i_mutex);
+ if (ret)
+ return ret;
+ return offset;
+}
+
const struct inode_operations ocfs2_file_iops = {
.setattr = ocfs2_setattr,
.getattr = ocfs2_getattr,
@@ -2615,7 +2707,7 @@ const struct inode_operations ocfs2_special_file_iops = {
* ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
*/
const struct file_operations ocfs2_fops = {
- .llseek = generic_file_llseek,
+ .llseek = ocfs2_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.mmap = ocfs2_mmap,
@@ -2663,7 +2755,7 @@ const struct file_operations ocfs2_dops = {
* the cluster.
*/
const struct file_operations ocfs2_fops_no_plocks = {
- .llseek = generic_file_llseek,
+ .llseek = ocfs2_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.mmap = ocfs2_mmap,
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index a22d2c09889..17454a904d7 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -951,7 +951,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode,
trace_ocfs2_cleanup_delete_inode(
(unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
if (sync_data)
- write_inode_now(inode, 1);
+ filemap_write_and_wait(inode->i_mapping);
truncate_inode_pages(&inode->i_data, 0);
}
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 1c508b149b3..88924a3133f 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -43,6 +43,9 @@ struct ocfs2_inode_info
/* protects extended attribute changes on this inode */
struct rw_semaphore ip_xattr_sem;
+ /* Number of outstanding AIO's which are not page aligned */
+ atomic_t ip_unaligned_aio;
+
/* These fields are protected by ip_lock */
spinlock_t ip_lock;
u32 ip_open_count;
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index bc91072b721..726ff265b29 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -122,7 +122,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) &
(OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) {
if (!capable(CAP_LINUX_IMMUTABLE))
- goto bail_unlock;
+ goto bail_commit;
}
ocfs2_inode->ip_attr = flags;
@@ -132,6 +132,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
if (status < 0)
mlog_errno(status);
+bail_commit:
ocfs2_commit_trans(osb, handle);
bail_unlock:
ocfs2_inode_unlock(inode, 1);
@@ -381,7 +382,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
if (!oifi) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ goto out_err;
}
if (o2info_from_user(*oifi, req))
@@ -431,7 +432,7 @@ bail:
o2info_set_request_error(&oifi->ifi_req, req);
kfree(oifi);
-
+out_err:
return status;
}
@@ -666,7 +667,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
if (!oiff) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ goto out_err;
}
if (o2info_from_user(*oiff, req))
@@ -716,7 +717,7 @@ bail:
o2info_set_request_error(&oiff->iff_req, req);
kfree(oiff);
-
+out_err:
return status;
}
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 295d56454e8..0a42ae96dca 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1544,9 +1544,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
/* we need to run complete recovery for offline orphan slots */
ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
- mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n",
- node_num, slot_num,
- MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
+ printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\
+ "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
+ MINOR(osb->sb->s_dev));
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
@@ -1601,6 +1601,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
jbd2_journal_destroy(journal);
+ printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\
+ "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
+ MINOR(osb->sb->s_dev));
done:
/* drop the lock on this nodes journal */
if (got_lock)
@@ -1808,6 +1811,20 @@ static inline unsigned long ocfs2_orphan_scan_timeout(void)
* every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
* is done to catch any orphans that are left over in orphan directories.
*
+ * It scans all slots, even ones that are in use. It does so to handle the
+ * case described below:
+ *
+ * Node 1 has an inode it was using. The dentry went away due to memory
+ * pressure. Node 1 closes the inode, but it's on the free list. The node
+ * has the open lock.
+ * Node 2 unlinks the inode. It grabs the dentry lock to notify others,
+ * but node 1 has no dentry and doesn't get the message. It trylocks the
+ * open lock, sees that another node has a PR, and does nothing.
+ * Later node 2 runs its orphan dir. It igets the inode, trylocks the
+ * open lock, sees the PR still, and does nothing.
+ * Basically, we have to trigger an orphan iput on node 1. The only way
+ * for this to happen is if node 1 runs node 2's orphan dir.
+ *
* ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
* seconds. It gets an EX lock on os_lockres and checks sequence number
* stored in LVB. If the sequence number has changed, it means some other
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 68cf2f6d3c6..a3385b63ff5 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -441,10 +441,11 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir,
#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2)
/* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota
- * update on dir + index leaf + dx root update for free list */
+ * update on dir + index leaf + dx root update for free list +
+ * previous dirblock update in the free list */
static inline int ocfs2_link_credits(struct super_block *sb)
{
- return 2*OCFS2_INODE_UPDATE_CREDITS + 3 +
+ return 2*OCFS2_INODE_UPDATE_CREDITS + 4 +
ocfs2_quota_trans_credits(sb);
}
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 3e9393ca39e..9cd41083e99 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -61,7 +61,7 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
struct page *page)
{
- int ret;
+ int ret = VM_FAULT_NOPAGE;
struct inode *inode = file->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
loff_t pos = page_offset(page);
@@ -71,32 +71,25 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
void *fsdata;
loff_t size = i_size_read(inode);
- /*
- * Another node might have truncated while we were waiting on
- * cluster locks.
- * We don't check size == 0 before the shift. This is borrowed
- * from do_generic_file_read.
- */
last_index = (size - 1) >> PAGE_CACHE_SHIFT;
- if (unlikely(!size || page->index > last_index)) {
- ret = -EINVAL;
- goto out;
- }
/*
- * The i_size check above doesn't catch the case where nodes
- * truncated and then re-extended the file. We'll re-check the
- * page mapping after taking the page lock inside of
- * ocfs2_write_begin_nolock().
+ * There are cases that lead to the page no longer bebongs to the
+ * mapping.
+ * 1) pagecache truncates locally due to memory pressure.
+ * 2) pagecache truncates when another is taking EX lock against
+ * inode lock. see ocfs2_data_convert_worker.
+ *
+ * The i_size check doesn't catch the case where nodes truncated and
+ * then re-extended the file. We'll re-check the page mapping after
+ * taking the page lock inside of ocfs2_write_begin_nolock().
+ *
+ * Let VM retry with these cases.
*/
- if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
- /*
- * the page has been umapped in ocfs2_data_downconvert_worker.
- * So return 0 here and let VFS retry.
- */
- ret = 0;
+ if ((page->mapping != inode->i_mapping) ||
+ (!PageUptodate(page)) ||
+ (page_offset(page) >= size))
goto out;
- }
/*
* Call ocfs2_write_begin() and ocfs2_write_end() to take
@@ -116,17 +109,21 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
if (ret) {
if (ret != -ENOSPC)
mlog_errno(ret);
+ if (ret == -ENOMEM)
+ ret = VM_FAULT_OOM;
+ else
+ ret = VM_FAULT_SIGBUS;
goto out;
}
- ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
- fsdata);
- if (ret < 0) {
- mlog_errno(ret);
+ if (!locked_page) {
+ ret = VM_FAULT_NOPAGE;
goto out;
}
+ ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
+ fsdata);
BUG_ON(ret != len);
- ret = 0;
+ ret = VM_FAULT_LOCKED;
out:
return ret;
}
@@ -168,8 +165,6 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
out:
ocfs2_unblock_signals(&oldset);
- if (ret)
- ret = VM_FAULT_SIGBUS;
return ret;
}
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index d53cb706f14..184c76b8c29 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -745,7 +745,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
*/
ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
new_phys_cpos);
- if (!new_phys_cpos) {
+ if (!*new_phys_cpos) {
ret = -ENOSPC;
goto out_commit;
}
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 409285854f6..d355e6e36b3 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -836,18 +836,65 @@ static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
{
- __test_and_set_bit_le(bit, bitmap);
+ __set_bit_le(bit, bitmap);
}
#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
{
- __test_and_clear_bit_le(bit, bitmap);
+ __clear_bit_le(bit, bitmap);
}
#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
#define ocfs2_test_bit test_bit_le
#define ocfs2_find_next_zero_bit find_next_zero_bit_le
#define ocfs2_find_next_bit find_next_bit_le
+
+static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr)
+{
+#if BITS_PER_LONG == 64
+ *bit += ((unsigned long) addr & 7UL) << 3;
+ addr = (void *) ((unsigned long) addr & ~7UL);
+#elif BITS_PER_LONG == 32
+ *bit += ((unsigned long) addr & 3UL) << 3;
+ addr = (void *) ((unsigned long) addr & ~3UL);
+#else
+#error "how many bits you are?!"
+#endif
+ return addr;
+}
+
+static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap)
+{
+ bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+ ocfs2_set_bit(bit, bitmap);
+}
+
+static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap)
+{
+ bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+ ocfs2_clear_bit(bit, bitmap);
+}
+
+static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap)
+{
+ bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+ return ocfs2_test_bit(bit, bitmap);
+}
+
+static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max,
+ int start)
+{
+ int fix = 0, ret, tmpmax;
+ bitmap = correct_addr_and_bit_unaligned(&fix, bitmap);
+ tmpmax = max + fix;
+ start += fix;
+
+ ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix;
+ if (ret > max)
+ return max;
+ return ret;
+}
+
#endif /* OCFS2_H */
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index dc8007fc924..f100bf70a90 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -404,7 +404,9 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
int status = 0;
struct ocfs2_quota_recovery *rec;
- mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num);
+ printk(KERN_NOTICE "ocfs2: Beginning quota recovery on device (%s) for "
+ "slot %u\n", osb->dev_str, slot_num);
+
rec = ocfs2_alloc_quota_recovery();
if (!rec)
return ERR_PTR(-ENOMEM);
@@ -549,8 +551,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
goto out_commit;
}
lock_buffer(qbh);
- WARN_ON(!ocfs2_test_bit(bit, dchunk->dqc_bitmap));
- ocfs2_clear_bit(bit, dchunk->dqc_bitmap);
+ WARN_ON(!ocfs2_test_bit_unaligned(bit, dchunk->dqc_bitmap));
+ ocfs2_clear_bit_unaligned(bit, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(qbh);
ocfs2_journal_dirty(handle, qbh);
@@ -596,7 +598,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
struct inode *lqinode;
unsigned int flags;
- mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num);
+ printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
+ "slot %u\n", osb->dev_str, slot_num);
+
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
for (type = 0; type < MAXQUOTAS; type++) {
if (list_empty(&(rec->r_list[type])))
@@ -612,8 +616,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
/* Someone else is holding the lock? Then he must be
* doing the recovery. Just skip the file... */
if (status == -EAGAIN) {
- mlog(ML_NOTICE, "skipping quota recovery for slot %d "
- "because quota file is locked.\n", slot_num);
+ printk(KERN_NOTICE "ocfs2: Skipping quota recovery on "
+ "device (%s) for slot %d because quota file is "
+ "locked.\n", osb->dev_str, slot_num);
status = 0;
goto out_put;
} else if (status < 0) {
@@ -944,7 +949,7 @@ static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb,
* ol_quota_entries_per_block(sb);
}
- found = ocfs2_find_next_zero_bit(dchunk->dqc_bitmap, len, 0);
+ found = ocfs2_find_next_zero_bit_unaligned(dchunk->dqc_bitmap, len, 0);
/* We failed? */
if (found == len) {
mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u"
@@ -1208,7 +1213,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private)
struct ocfs2_local_disk_chunk *dchunk;
dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
- ocfs2_set_bit(*offset, dchunk->dqc_bitmap);
+ ocfs2_set_bit_unaligned(*offset, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, -1);
}
@@ -1289,7 +1294,7 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
(od->dq_chunk->qc_headerbh->b_data);
/* Mark structure as freed */
lock_buffer(od->dq_chunk->qc_headerbh);
- ocfs2_clear_bit(offset, dchunk->dqc_bitmap);
+ ocfs2_clear_bit_unaligned(offset, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(od->dq_chunk->qc_headerbh);
ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index 26fc0014d50..1424c151ccc 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -493,8 +493,8 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
goto bail;
}
} else
- mlog(ML_NOTICE, "slot %d is already allocated to this node!\n",
- slot);
+ printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
+ "allocated to this node!\n", slot, osb->dev_str);
ocfs2_set_slot(si, slot, osb->node_num);
osb->slot_num = slot;
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 19965b00c43..94368017edb 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -28,6 +28,7 @@
#include "cluster/masklog.h"
#include "cluster/nodemanager.h"
#include "cluster/heartbeat.h"
+#include "cluster/tcp.h"
#include "stackglue.h"
@@ -256,6 +257,61 @@ static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
}
/*
+ * Check if this node is heartbeating and is connected to all other
+ * heartbeating nodes.
+ */
+static int o2cb_cluster_check(void)
+{
+ u8 node_num;
+ int i;
+ unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+
+ node_num = o2nm_this_node();
+ if (node_num == O2NM_MAX_NODES) {
+ printk(KERN_ERR "o2cb: This node has not been configured.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * o2dlm expects o2net sockets to be created. If not, then
+ * dlm_join_domain() fails with a stack of errors which are both cryptic
+ * and incomplete. The idea here is to detect upfront whether we have
+ * managed to connect to all nodes or not. If not, then list the nodes
+ * to allow the user to check the configuration (incorrect IP, firewall,
+ * etc.) Yes, this is racy. But its not the end of the world.
+ */
+#define O2CB_MAP_STABILIZE_COUNT 60
+ for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) {
+ o2hb_fill_node_map(hbmap, sizeof(hbmap));
+ if (!test_bit(node_num, hbmap)) {
+ printk(KERN_ERR "o2cb: %s heartbeat has not been "
+ "started.\n", (o2hb_global_heartbeat_active() ?
+ "Global" : "Local"));
+ return -EINVAL;
+ }
+ o2net_fill_node_map(netmap, sizeof(netmap));
+ /* Force set the current node to allow easy compare */
+ set_bit(node_num, netmap);
+ if (!memcmp(hbmap, netmap, sizeof(hbmap)))
+ return 0;
+ if (i < O2CB_MAP_STABILIZE_COUNT)
+ msleep(1000);
+ }
+
+ printk(KERN_ERR "o2cb: This node could not connect to nodes:");
+ i = -1;
+ while ((i = find_next_bit(hbmap, O2NM_MAX_NODES,
+ i + 1)) < O2NM_MAX_NODES) {
+ if (!test_bit(i, netmap))
+ printk(" %u", i);
+ }
+ printk(".\n");
+
+ return -ENOTCONN;
+}
+
+/*
* Called from the dlm when it's about to evict a node. This is how the
* classic stack signals node death.
*/
@@ -263,8 +319,8 @@ static void o2dlm_eviction_cb(int node_num, void *data)
{
struct ocfs2_cluster_connection *conn = data;
- mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n",
- node_num, conn->cc_namelen, conn->cc_name);
+ printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n",
+ node_num, conn->cc_namelen, conn->cc_name);
conn->cc_recovery_handler(node_num, conn->cc_recovery_data);
}
@@ -280,12 +336,11 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
BUG_ON(conn == NULL);
BUG_ON(conn->cc_proto == NULL);
- /* for now we only have one cluster/node, make sure we see it
- * in the heartbeat universe */
- if (!o2hb_check_local_node_heartbeating()) {
- if (o2hb_global_heartbeat_active())
- mlog(ML_ERROR, "Global heartbeat not started\n");
- rc = -EINVAL;
+ /* Ensure cluster stack is up and all nodes are connected */
+ rc = o2cb_cluster_check();
+ if (rc) {
+ printk(KERN_ERR "o2cb: Cluster check failed. Fix errors "
+ "before retrying.\n");
goto out;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 56f61027236..4994f8b0e60 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -54,6 +54,7 @@
#include "ocfs1_fs_compat.h"
#include "alloc.h"
+#include "aops.h"
#include "blockcheck.h"
#include "dlmglue.h"
#include "export.h"
@@ -1107,9 +1108,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
ocfs2_set_ro_flag(osb, 1);
- printk(KERN_NOTICE "Readonly device detected. No cluster "
- "services will be utilized for this mount. Recovery "
- "will be skipped.\n");
+ printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. "
+ "Cluster services will not be used for this mount. "
+ "Recovery will be skipped.\n", osb->dev_str);
}
if (!ocfs2_is_hard_readonly(osb)) {
@@ -1616,12 +1617,17 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
return 0;
}
+wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
+
static int __init ocfs2_init(void)
{
- int status;
+ int status, i;
ocfs2_print_version();
+ for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++)
+ init_waitqueue_head(&ocfs2__ioend_wq[i]);
+
status = init_ocfs2_uptodate_cache();
if (status < 0) {
mlog_errno(status);
@@ -1760,7 +1766,7 @@ static void ocfs2_inode_init_once(void *data)
ocfs2_extent_map_init(&oi->vfs_inode);
INIT_LIST_HEAD(&oi->ip_io_markers);
oi->ip_dir_start_lookup = 0;
-
+ atomic_set(&oi->ip_unaligned_aio, 0);
init_rwsem(&oi->ip_alloc_sem);
init_rwsem(&oi->ip_xattr_sem);
mutex_init(&oi->ip_io_mutex);
@@ -1974,7 +1980,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
* If we failed before we got a uuid_str yet, we can't stop
* heartbeat. Otherwise, do it.
*/
- if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str)
+ if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str &&
+ !ocfs2_is_hard_readonly(osb))
hangup_needed = 1;
if (osb->cconn)
@@ -2353,7 +2360,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
mlog_errno(status);
goto bail;
}
- cleancache_init_shared_fs((char *)&uuid_net_key, sb);
+ cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb);
bail:
return status;
@@ -2462,8 +2469,8 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
goto finally;
}
} else {
- mlog(ML_NOTICE, "File system was not unmounted cleanly, "
- "recovering volume.\n");
+ printk(KERN_NOTICE "ocfs2: File system on device (%s) was not "
+ "unmounted cleanly, recovering it.\n", osb->dev_str);
}
local = ocfs2_mount_local(osb);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 194fb22ef79..aa9e8777b09 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -2376,16 +2376,18 @@ static int ocfs2_remove_value_outside(struct inode*inode,
}
ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
- if (ret < 0) {
- mlog_errno(ret);
- break;
- }
ocfs2_commit_trans(osb, ctxt.handle);
if (ctxt.meta_ac) {
ocfs2_free_alloc_context(ctxt.meta_ac);
ctxt.meta_ac = NULL;
}
+
+ if (ret < 0) {
+ mlog_errno(ret);
+ break;
+ }
+
}
if (ctxt.meta_ac)
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 586174168e2..80e4645f799 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
K(i.freeswap),
K(global_page_state(NR_FILE_DIRTY)),
K(global_page_state(NR_WRITEBACK)),
- K(global_page_state(NR_ANON_PAGES)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ K(global_page_state(NR_ANON_PAGES)
+ global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR
+ HPAGE_PMD_NR),
+#else
+ K(global_page_state(NR_ANON_PAGES)),
#endif
- ),
K(global_page_state(NR_FILE_MAPPED)),
K(global_page_state(NR_SHMEM)),
K(global_page_state(NR_SLAB_RECLAIMABLE) +
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 9a8a2b77b87..03102d97818 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -91,20 +91,18 @@ static struct file_system_type proc_fs_type = {
void __init proc_root_init(void)
{
- struct vfsmount *mnt;
int err;
proc_init_inodecache();
err = register_filesystem(&proc_fs_type);
if (err)
return;
- mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
- if (IS_ERR(mnt)) {
+ err = pid_ns_prepare_proc(&init_pid_ns);
+ if (err) {
unregister_filesystem(&proc_fs_type);
return;
}
- init_pid_ns.proc_mnt = mnt;
proc_symlink("mounts", NULL, "self/mounts");
proc_net_init();
@@ -209,5 +207,5 @@ int pid_ns_prepare_proc(struct pid_namespace *ns)
void pid_ns_release_proc(struct pid_namespace *ns)
{
- mntput(ns->proc_mnt);
+ kern_unmount(ns->proc_mnt);
}
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 42b274da92c..2a30d67dd6b 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu)
idle = kstat_cpu(cpu).cpustat.idle;
idle = cputime64_add(idle, arch_idle_time(cpu));
} else
- idle = usecs_to_cputime(idle_time);
+ idle = nsecs_to_jiffies64(1000 * idle_time);
return idle;
}
@@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu)
/* !NO_HZ so we can rely on cpustat.iowait */
iowait = kstat_cpu(cpu).cpustat.iowait;
else
- iowait = usecs_to_cputime(iowait_time);
+ iowait = nsecs_to_jiffies64(1000 * iowait_time);
return iowait;
}
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 2bd620f0d79..57bbf9078ac 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -167,6 +167,7 @@ int pstore_register(struct pstore_info *psi)
}
psinfo = psi;
+ mutex_init(&psinfo->read_mutex);
spin_unlock(&pstore_lock);
if (owner && !try_module_get(owner)) {
@@ -195,30 +196,32 @@ EXPORT_SYMBOL_GPL(pstore_register);
void pstore_get_records(int quiet)
{
struct pstore_info *psi = psinfo;
+ char *buf = NULL;
ssize_t size;
u64 id;
enum pstore_type_id type;
struct timespec time;
int failed = 0, rc;
- unsigned long flags;
if (!psi)
return;
- spin_lock_irqsave(&psinfo->buf_lock, flags);
+ mutex_lock(&psi->read_mutex);
rc = psi->open(psi);
if (rc)
goto out;
- while ((size = psi->read(&id, &type, &time, psi)) > 0) {
- rc = pstore_mkfile(type, psi->name, id, psi->buf, (size_t)size,
+ while ((size = psi->read(&id, &type, &time, &buf, psi)) > 0) {
+ rc = pstore_mkfile(type, psi->name, id, buf, (size_t)size,
time, psi);
+ kfree(buf);
+ buf = NULL;
if (rc && (rc != -EEXIST || !quiet))
failed++;
}
psi->close(psi);
out:
- spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ mutex_unlock(&psi->read_mutex);
if (failed)
printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n",
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 05d6b0e78c9..dba43c3ea3a 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -449,8 +449,6 @@ EXPORT_SYMBOL(seq_path);
/*
* Same as seq_path, but relative to supplied root.
- *
- * root may be changed, see __d_path().
*/
int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
char *esc)
@@ -463,6 +461,8 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
char *p;
p = __d_path(path, root, buf, size);
+ if (!p)
+ return SEQ_SKIP;
res = PTR_ERR(p);
if (!IS_ERR(p)) {
char *end = mangle_path(buf, p, esc);
@@ -474,7 +474,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
}
seq_commit(m, res);
- return res < 0 ? res : 0;
+ return res < 0 && res != -ENAMETOOLONG ? res : 0;
}
/*
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 20403dc5d43..ae0e76bb6eb 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2264,19 +2264,12 @@ static int __init ubifs_init(void)
return -EINVAL;
}
- err = register_filesystem(&ubifs_fs_type);
- if (err) {
- ubifs_err("cannot register file system, error %d", err);
- return err;
- }
-
- err = -ENOMEM;
ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab",
sizeof(struct ubifs_inode), 0,
SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
&inode_slab_ctor);
if (!ubifs_inode_slab)
- goto out_reg;
+ return -ENOMEM;
register_shrinker(&ubifs_shrinker_info);
@@ -2288,15 +2281,20 @@ static int __init ubifs_init(void)
if (err)
goto out_compr;
+ err = register_filesystem(&ubifs_fs_type);
+ if (err) {
+ ubifs_err("cannot register file system, error %d", err);
+ goto out_dbg;
+ }
return 0;
+out_dbg:
+ dbg_debugfs_exit();
out_compr:
ubifs_compressors_exit();
out_shrinker:
unregister_shrinker(&ubifs_shrinker_info);
kmem_cache_destroy(ubifs_inode_slab);
-out_reg:
- unregister_filesystem(&ubifs_fs_type);
return err;
}
/* late_initcall to let compressors initialize first */
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index b6c4b3795c4..76e4266d2e7 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -42,6 +42,8 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
int count, i;
count = be32_to_cpu(aclp->acl_cnt);
+ if (count > XFS_ACL_MAX_ENTRIES)
+ return ERR_PTR(-EFSCORRUPTED);
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index d4906e7c978..c1b55e59655 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -110,6 +110,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
/*
* Query whether the requested number of additional bytes of extended
* attribute space will be able to fit inline.
+ *
* Returns zero if not, else the di_forkoff fork offset to be used in the
* literal area for attribute data once the new bytes have been added.
*
@@ -122,7 +123,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
int offset;
int minforkoff; /* lower limit on valid forkoff locations */
int maxforkoff; /* upper limit on valid forkoff locations */
- int dsize;
+ int dsize;
xfs_mount_t *mp = dp->i_mount;
offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
@@ -136,47 +137,60 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
return (offset >= minforkoff) ? minforkoff : 0;
}
- if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
- if (bytes <= XFS_IFORK_ASIZE(dp))
- return dp->i_d.di_forkoff;
+ /*
+ * If the requested numbers of bytes is smaller or equal to the
+ * current attribute fork size we can always proceed.
+ *
+ * Note that if_bytes in the data fork might actually be larger than
+ * the current data fork size is due to delalloc extents. In that
+ * case either the extent count will go down when they are converted
+ * to real extents, or the delalloc conversion will take care of the
+ * literal area rebalancing.
+ */
+ if (bytes <= XFS_IFORK_ASIZE(dp))
+ return dp->i_d.di_forkoff;
+
+ /*
+ * For attr2 we can try to move the forkoff if there is space in the
+ * literal area, but for the old format we are done if there is no
+ * space in the fixed attribute fork.
+ */
+ if (!(mp->m_flags & XFS_MOUNT_ATTR2))
return 0;
- }
dsize = dp->i_df.if_bytes;
-
+
switch (dp->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
- /*
+ /*
* If there is no attr fork and the data fork is extents,
- * determine if creating the default attr fork will result
- * in the extents form migrating to btree. If so, the
- * minimum offset only needs to be the space required for
+ * determine if creating the default attr fork will result
+ * in the extents form migrating to btree. If so, the
+ * minimum offset only needs to be the space required for
* the btree root.
- */
+ */
if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
xfs_default_attroffset(dp))
dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
break;
-
case XFS_DINODE_FMT_BTREE:
/*
- * If have data btree then keep forkoff if we have one,
- * otherwise we are adding a new attr, so then we set
- * minforkoff to where the btree root can finish so we have
+ * If we have a data btree then keep forkoff if we have one,
+ * otherwise we are adding a new attr, so then we set
+ * minforkoff to where the btree root can finish so we have
* plenty of room for attrs
*/
if (dp->i_d.di_forkoff) {
- if (offset < dp->i_d.di_forkoff)
+ if (offset < dp->i_d.di_forkoff)
return 0;
- else
- return dp->i_d.di_forkoff;
- } else
- dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
+ return dp->i_d.di_forkoff;
+ }
+ dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
break;
}
-
- /*
- * A data fork btree root must have space for at least
+
+ /*
+ * A data fork btree root must have space for at least
* MINDBTPTRS key/ptr pairs if the data fork is small or empty.
*/
minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
@@ -186,10 +200,10 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
maxforkoff = maxforkoff >> 3; /* rounded down */
- if (offset >= minforkoff && offset < maxforkoff)
- return offset;
if (offset >= maxforkoff)
return maxforkoff;
+ if (offset >= minforkoff)
+ return offset;
return 0;
}
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index c68baeb0974..d0ab7883705 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2383,6 +2383,8 @@ xfs_bmap_btalloc(
int tryagain;
int error;
+ ASSERT(ap->length);
+
mp = ap->ip->i_mount;
align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
if (unlikely(align)) {
@@ -4629,6 +4631,8 @@ xfs_bmapi_allocate(
int error;
int rt;
+ ASSERT(bma->length > 0);
+
rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
/*
@@ -4849,6 +4853,7 @@ xfs_bmapi_write(
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
ASSERT(!(flags & XFS_BMAPI_IGSTATE));
ASSERT(tp != NULL);
+ ASSERT(len > 0);
whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
XFS_ATTR_FORK : XFS_DATA_FORK;
@@ -4918,9 +4923,22 @@ xfs_bmapi_write(
bma.eof = eof;
bma.conv = !!(flags & XFS_BMAPI_CONVERT);
bma.wasdel = wasdelay;
- bma.length = len;
bma.offset = bno;
+ /*
+ * There's a 32/64 bit type mismatch between the
+ * allocation length request (which can be 64 bits in
+ * length) and the bma length request, which is
+ * xfs_extlen_t and therefore 32 bits. Hence we have to
+ * check for 32-bit overflows and handle them here.
+ */
+ if (len > (xfs_filblks_t)MAXEXTLEN)
+ bma.length = MAXEXTLEN;
+ else
+ bma.length = len;
+
+ ASSERT(len > 0);
+ ASSERT(bma.length > 0);
error = xfs_bmapi_allocate(&bma, flags);
if (error)
goto error0;
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index da108977b21..558910f5e3c 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -98,22 +98,22 @@ xfs_fs_encode_fh(
switch (fileid_type) {
case FILEID_INO32_GEN_PARENT:
spin_lock(&dentry->d_lock);
- fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
+ fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
spin_unlock(&dentry->d_lock);
/*FALLTHRU*/
case FILEID_INO32_GEN:
- fid->i32.ino = inode->i_ino;
+ fid->i32.ino = XFS_I(inode)->i_ino;
fid->i32.gen = inode->i_generation;
break;
case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
spin_lock(&dentry->d_lock);
- fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
+ fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
spin_unlock(&dentry->d_lock);
/*FALLTHRU*/
case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
- fid64->ino = inode->i_ino;
+ fid64->ino = XFS_I(inode)->i_ino;
fid64->gen = inode->i_generation;
break;
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c0237c602f1..755ee816488 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2835,6 +2835,27 @@ corrupt_out:
return XFS_ERROR(EFSCORRUPTED);
}
+void
+xfs_promote_inode(
+ struct xfs_inode *ip)
+{
+ struct xfs_buf *bp;
+
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+
+ bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
+ ip->i_imap.im_len, XBF_TRYLOCK);
+ if (!bp)
+ return;
+
+ if (XFS_BUF_ISDELAYWRITE(bp)) {
+ xfs_buf_delwri_promote(bp);
+ wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
+ }
+
+ xfs_buf_relse(bp);
+}
+
/*
* Return a pointer to the extent record at file index idx.
*/
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 760140d1dd6..b4cd4739f98 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -498,6 +498,7 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
void xfs_iext_realloc(xfs_inode_t *, int, int);
void xfs_iunpin_wait(xfs_inode_t *);
int xfs_iflush(xfs_inode_t *, uint);
+void xfs_promote_inode(struct xfs_inode *);
void xfs_lock_inodes(xfs_inode_t **, int, uint);
void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a14cd89fe46..34817adf4b9 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -150,6 +150,117 @@ xlog_grant_add_space(
} while (head_val != old);
}
+STATIC bool
+xlog_reserveq_wake(
+ struct log *log,
+ int *free_bytes)
+{
+ struct xlog_ticket *tic;
+ int need_bytes;
+
+ list_for_each_entry(tic, &log->l_reserveq, t_queue) {
+ if (tic->t_flags & XLOG_TIC_PERM_RESERV)
+ need_bytes = tic->t_unit_res * tic->t_cnt;
+ else
+ need_bytes = tic->t_unit_res;
+
+ if (*free_bytes < need_bytes)
+ return false;
+ *free_bytes -= need_bytes;
+
+ trace_xfs_log_grant_wake_up(log, tic);
+ wake_up(&tic->t_wait);
+ }
+
+ return true;
+}
+
+STATIC bool
+xlog_writeq_wake(
+ struct log *log,
+ int *free_bytes)
+{
+ struct xlog_ticket *tic;
+ int need_bytes;
+
+ list_for_each_entry(tic, &log->l_writeq, t_queue) {
+ ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
+
+ need_bytes = tic->t_unit_res;
+
+ if (*free_bytes < need_bytes)
+ return false;
+ *free_bytes -= need_bytes;
+
+ trace_xfs_log_regrant_write_wake_up(log, tic);
+ wake_up(&tic->t_wait);
+ }
+
+ return true;
+}
+
+STATIC int
+xlog_reserveq_wait(
+ struct log *log,
+ struct xlog_ticket *tic,
+ int need_bytes)
+{
+ list_add_tail(&tic->t_queue, &log->l_reserveq);
+
+ do {
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ xlog_grant_push_ail(log, need_bytes);
+
+ XFS_STATS_INC(xs_sleep_logspace);
+ trace_xfs_log_grant_sleep(log, tic);
+
+ xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+ trace_xfs_log_grant_wake(log, tic);
+
+ spin_lock(&log->l_grant_reserve_lock);
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
+
+ list_del_init(&tic->t_queue);
+ return 0;
+shutdown:
+ list_del_init(&tic->t_queue);
+ return XFS_ERROR(EIO);
+}
+
+STATIC int
+xlog_writeq_wait(
+ struct log *log,
+ struct xlog_ticket *tic,
+ int need_bytes)
+{
+ list_add_tail(&tic->t_queue, &log->l_writeq);
+
+ do {
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ xlog_grant_push_ail(log, need_bytes);
+
+ XFS_STATS_INC(xs_sleep_logspace);
+ trace_xfs_log_regrant_write_sleep(log, tic);
+
+ xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+ trace_xfs_log_regrant_write_wake(log, tic);
+
+ spin_lock(&log->l_grant_write_lock);
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
+
+ list_del_init(&tic->t_queue);
+ return 0;
+shutdown:
+ list_del_init(&tic->t_queue);
+ return XFS_ERROR(EIO);
+}
+
static void
xlog_tic_reset_res(xlog_ticket_t *tic)
{
@@ -350,8 +461,19 @@ xfs_log_reserve(
retval = xlog_grant_log_space(log, internal_ticket);
}
+ if (unlikely(retval)) {
+ /*
+ * If we are failing, make sure the ticket doesn't have any
+ * current reservations. We don't want to add this back
+ * when the ticket/ transaction gets cancelled.
+ */
+ internal_ticket->t_curr_res = 0;
+ /* ungrant will give back unit_res * t_cnt. */
+ internal_ticket->t_cnt = 0;
+ }
+
return retval;
-} /* xfs_log_reserve */
+}
/*
@@ -2481,8 +2603,8 @@ restart:
/*
* Atomically get the log space required for a log ticket.
*
- * Once a ticket gets put onto the reserveq, it will only return after
- * the needed reservation is satisfied.
+ * Once a ticket gets put onto the reserveq, it will only return after the
+ * needed reservation is satisfied.
*
* This function is structured so that it has a lock free fast path. This is
* necessary because every new transaction reservation will come through this
@@ -2490,113 +2612,53 @@ restart:
* every pass.
*
* As tickets are only ever moved on and off the reserveq under the
- * l_grant_reserve_lock, we only need to take that lock if we are going
- * to add the ticket to the queue and sleep. We can avoid taking the lock if the
- * ticket was never added to the reserveq because the t_queue list head will be
- * empty and we hold the only reference to it so it can safely be checked
- * unlocked.
+ * l_grant_reserve_lock, we only need to take that lock if we are going to add
+ * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
+ * was never added to the reserveq because the t_queue list head will be empty
+ * and we hold the only reference to it so it can safely be checked unlocked.
*/
STATIC int
-xlog_grant_log_space(xlog_t *log,
- xlog_ticket_t *tic)
+xlog_grant_log_space(
+ struct log *log,
+ struct xlog_ticket *tic)
{
- int free_bytes;
- int need_bytes;
+ int free_bytes, need_bytes;
+ int error = 0;
-#ifdef DEBUG
- if (log->l_flags & XLOG_ACTIVE_RECOVERY)
- panic("grant Recovery problem");
-#endif
+ ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
trace_xfs_log_grant_enter(log, tic);
+ /*
+ * If there are other waiters on the queue then give them a chance at
+ * logspace before us. Wake up the first waiters, if we do not wake
+ * up all the waiters then go to sleep waiting for more free space,
+ * otherwise try to get some space for this transaction.
+ */
need_bytes = tic->t_unit_res;
if (tic->t_flags & XFS_LOG_PERM_RESERV)
need_bytes *= tic->t_ocnt;
-
- /* something is already sleeping; insert new transaction at end */
- if (!list_empty_careful(&log->l_reserveq)) {
- spin_lock(&log->l_grant_reserve_lock);
- /* recheck the queue now we are locked */
- if (list_empty(&log->l_reserveq)) {
- spin_unlock(&log->l_grant_reserve_lock);
- goto redo;
- }
- list_add_tail(&tic->t_queue, &log->l_reserveq);
-
- trace_xfs_log_grant_sleep1(log, tic);
-
- /*
- * Gotta check this before going to sleep, while we're
- * holding the grant lock.
- */
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
- /*
- * If we got an error, and the filesystem is shutting down,
- * we'll catch it down below. So just continue...
- */
- trace_xfs_log_grant_wake1(log, tic);
- }
-
-redo:
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
-
free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
- if (free_bytes < need_bytes) {
+ if (!list_empty_careful(&log->l_reserveq)) {
spin_lock(&log->l_grant_reserve_lock);
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_reserveq);
-
- trace_xfs_log_grant_sleep2(log, tic);
-
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
- trace_xfs_log_grant_wake2(log, tic);
- goto redo;
- }
-
- if (!list_empty(&tic->t_queue)) {
+ if (!xlog_reserveq_wake(log, &free_bytes) ||
+ free_bytes < need_bytes)
+ error = xlog_reserveq_wait(log, tic, need_bytes);
+ spin_unlock(&log->l_grant_reserve_lock);
+ } else if (free_bytes < need_bytes) {
spin_lock(&log->l_grant_reserve_lock);
- list_del_init(&tic->t_queue);
+ error = xlog_reserveq_wait(log, tic, need_bytes);
spin_unlock(&log->l_grant_reserve_lock);
}
+ if (error)
+ return error;
- /* we've got enough space */
xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
trace_xfs_log_grant_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
-
-error_return_unlocked:
- spin_lock(&log->l_grant_reserve_lock);
-error_return:
- list_del_init(&tic->t_queue);
- spin_unlock(&log->l_grant_reserve_lock);
- trace_xfs_log_grant_error(log, tic);
-
- /*
- * If we are failing, make sure the ticket doesn't have any
- * current reservations. We don't want to add this back when
- * the ticket/transaction gets cancelled.
- */
- tic->t_curr_res = 0;
- tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
- return XFS_ERROR(EIO);
-} /* xlog_grant_log_space */
-
+}
/*
* Replenish the byte reservation required by moving the grant write head.
@@ -2605,10 +2667,12 @@ error_return:
* free fast path.
*/
STATIC int
-xlog_regrant_write_log_space(xlog_t *log,
- xlog_ticket_t *tic)
+xlog_regrant_write_log_space(
+ struct log *log,
+ struct xlog_ticket *tic)
{
- int free_bytes, need_bytes;
+ int free_bytes, need_bytes;
+ int error = 0;
tic->t_curr_res = tic->t_unit_res;
xlog_tic_reset_res(tic);
@@ -2616,104 +2680,38 @@ xlog_regrant_write_log_space(xlog_t *log,
if (tic->t_cnt > 0)
return 0;
-#ifdef DEBUG
- if (log->l_flags & XLOG_ACTIVE_RECOVERY)
- panic("regrant Recovery problem");
-#endif
+ ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
trace_xfs_log_regrant_write_enter(log, tic);
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
- /* If there are other waiters on the queue then give them a
- * chance at logspace before us. Wake up the first waiters,
- * if we do not wake up all the waiters then go to sleep waiting
- * for more free space, otherwise try to get some space for
- * this transaction.
+ /*
+ * If there are other waiters on the queue then give them a chance at
+ * logspace before us. Wake up the first waiters, if we do not wake
+ * up all the waiters then go to sleep waiting for more free space,
+ * otherwise try to get some space for this transaction.
*/
need_bytes = tic->t_unit_res;
- if (!list_empty_careful(&log->l_writeq)) {
- struct xlog_ticket *ntic;
-
- spin_lock(&log->l_grant_write_lock);
- free_bytes = xlog_space_left(log, &log->l_grant_write_head);
- list_for_each_entry(ntic, &log->l_writeq, t_queue) {
- ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
-
- if (free_bytes < ntic->t_unit_res)
- break;
- free_bytes -= ntic->t_unit_res;
- wake_up(&ntic->t_wait);
- }
-
- if (ntic != list_first_entry(&log->l_writeq,
- struct xlog_ticket, t_queue)) {
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_writeq);
- trace_xfs_log_regrant_write_sleep1(log, tic);
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
- trace_xfs_log_regrant_write_wake1(log, tic);
- } else
- spin_unlock(&log->l_grant_write_lock);
- }
-
-redo:
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
-
free_bytes = xlog_space_left(log, &log->l_grant_write_head);
- if (free_bytes < need_bytes) {
+ if (!list_empty_careful(&log->l_writeq)) {
spin_lock(&log->l_grant_write_lock);
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_writeq);
-
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- trace_xfs_log_regrant_write_sleep2(log, tic);
- xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-
- trace_xfs_log_regrant_write_wake2(log, tic);
- goto redo;
- }
-
- if (!list_empty(&tic->t_queue)) {
+ if (!xlog_writeq_wake(log, &free_bytes) ||
+ free_bytes < need_bytes)
+ error = xlog_writeq_wait(log, tic, need_bytes);
+ spin_unlock(&log->l_grant_write_lock);
+ } else if (free_bytes < need_bytes) {
spin_lock(&log->l_grant_write_lock);
- list_del_init(&tic->t_queue);
+ error = xlog_writeq_wait(log, tic, need_bytes);
spin_unlock(&log->l_grant_write_lock);
}
- /* we've got enough space */
+ if (error)
+ return error;
+
xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
trace_xfs_log_regrant_write_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
-
-
- error_return_unlocked:
- spin_lock(&log->l_grant_write_lock);
- error_return:
- list_del_init(&tic->t_queue);
- spin_unlock(&log->l_grant_write_lock);
- trace_xfs_log_regrant_write_error(log, tic);
-
- /*
- * If we are failing, make sure the ticket doesn't have any
- * current reservations. We don't want to add this back when
- * the ticket/transaction gets cancelled.
- */
- tic->t_curr_res = 0;
- tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
- return XFS_ERROR(EIO);
-} /* xlog_regrant_write_log_space */
-
+}
/* The first cnt-1 times through here we don't need to
* move the grant write head because the permanent
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index aa3dc1a4d53..be5c51d8f75 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -770,6 +770,17 @@ restart:
if (!xfs_iflock_nowait(ip)) {
if (!(sync_mode & SYNC_WAIT))
goto out;
+
+ /*
+ * If we only have a single dirty inode in a cluster there is
+ * a fair chance that the AIL push may have pushed it into
+ * the buffer, but xfsbufd won't touch it until 30 seconds
+ * from now, and thus we will lock up here.
+ *
+ * Promote the inode buffer to the front of the delwri list
+ * and wake up xfsbufd now.
+ */
+ xfs_promote_inode(ip);
xfs_iflock(ip);
}
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index f1d2802b2f0..49403579887 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -834,18 +834,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index f4c38d8c667..2292d1af9d7 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs)
__SYSCALL(__NR_setns, sys_setns)
#define __NR_sendmmsg 269
__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+ compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+ compat_sys_process_vm_writev)
#undef __NR_syscalls
-#define __NR_syscalls 270
+#define __NR_syscalls 272
/*
* All syscalls below here should go away really,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index f81676f1b31..14b6cd02228 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -182,8 +182,11 @@
{0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -195,8 +198,18 @@
{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
@@ -238,6 +251,7 @@
{0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -480,6 +494,8 @@
{0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -494,6 +510,8 @@
{0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
#define r128_PCI_IDS \
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 1d161cb3aca..12050434d57 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -32,17 +32,16 @@
/**
* User-desired buffer creation information structure.
*
- * @size: requested size for the object.
+ * @size: user-desired memory allocation size.
* - this size value would be page-aligned internally.
* @flags: user request for setting memory type or cache attributes.
- * @handle: returned handle for the object.
- * @pad: just padding to be 64-bit aligned.
+ * @handle: returned a handle to created gem object.
+ * - this handle will be set by gem module of kernel side.
*/
struct drm_exynos_gem_create {
- unsigned int size;
+ uint64_t size;
unsigned int flags;
unsigned int handle;
- unsigned int pad;
};
/**
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c7a6d3b5bc7..94acd8172b5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *);
*/
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
- request_fn_proc *,
- spinlock_t *, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
request_fn_proc *, spinlock_t *);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 139c4db55f1..081147da056 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -71,7 +71,7 @@ struct timecounter {
/**
* cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @tc: Pointer to cycle counter.
+ * @cc: Pointer to cycle counter.
* @cycles: Cycles
*
* XXX - This could use some mult_lxl_ll() asm optimization. Same code
@@ -114,7 +114,7 @@ extern u64 timecounter_read(struct timecounter *tc);
* time base as values returned by
* timecounter_read()
* @tc: Pointer to time counter.
- * @cycle: a value returned by tc->cc->read()
+ * @cycle_tstamp: a value returned by tc->cc->read()
*
* Cycle counts that are converted correctly as long as they
* fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
@@ -156,10 +156,12 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
* @max_idle_ns: max idle time permitted by the clocksource (nsecs)
+ * @maxadj: maximum adjustment value to mult (~11%)
* @flags: flags describing special properties
* @archdata: arch-specific data
* @suspend: suspend function for the clocksource, if necessary
* @resume: resume function for the clocksource, if necessary
+ * @cycle_last: most recent cycle counter value seen by ::read()
*/
struct clocksource {
/*
@@ -172,7 +174,7 @@ struct clocksource {
u32 mult;
u32 shift;
u64 max_idle_ns;
-
+ u32 maxadj;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
@@ -186,6 +188,7 @@ struct clocksource {
void (*suspend)(struct clocksource *cs);
void (*resume)(struct clocksource *cs);
+ /* private: */
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
struct list_head wd_list;
@@ -260,6 +263,9 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
/**
* clocksource_cyc2ns - converts clocksource cycles to nanoseconds
+ * @cycles: cycles
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
*
* Converts cycles to nanoseconds, using the given mult and shift.
*
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 154bf568301..66ed067fb72 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
extern void __user *compat_alloc_user_space(unsigned long len);
+asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ unsigned long liovcnt, const struct compat_iovec __user *rvec,
+ unsigned long riovcnt, unsigned long flags);
+asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ unsigned long liovcnt, const struct compat_iovec __user *rvec,
+ unsigned long riovcnt, unsigned long flags);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 4df92619936..ed9f74f6c51 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -339,7 +339,8 @@ extern int d_validate(struct dentry *, struct dentry *);
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
-extern char *__d_path(const struct path *path, struct path *root, char *, int);
+extern char *__d_path(const struct path *, const struct path *, char *, int);
+extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
extern char *d_path_with_unreachable(const struct path *, char *, int);
extern char *dentry_path_raw(struct dentry *, char *, int);
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index ef90cbd8e17..57c9a8ae4f2 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
+extern int intel_iommu_enabled;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
@@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu)
{
}
#define dmar_disabled (1)
+#define intel_iommu_enabled (0)
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e3130220ce3..e0bc4ffb8e7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -393,8 +393,8 @@ struct inodes_stat_t {
#include <linux/semaphore.h>
#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
-#include <linux/shrinker.h>
#include <linux/atomic.h>
+#include <linux/shrinker.h>
#include <asm/byteorder.h>
@@ -1942,6 +1942,7 @@ extern int fd_statfs(int, struct kstatfs *);
extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
+extern bool our_mnt(struct vfsmount *mnt);
extern int current_umask(void);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 96efa6794ea..c3da42dd22b 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -172,6 +172,7 @@ enum {
TRACE_EVENT_FL_FILTERED_BIT,
TRACE_EVENT_FL_RECORDED_CMD_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT,
+ TRACE_EVENT_FL_NO_SET_FILTER_BIT,
};
enum {
@@ -179,6 +180,7 @@ enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
+ TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
};
struct ftrace_event_call {
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 94b1e356c02..32574eef939 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -126,6 +126,8 @@ extern struct cred init_cred;
# define INIT_PERF_EVENTS(tsk)
#endif
+#define INIT_TASK_COMM "swapper"
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,7 +164,7 @@ extern struct cred init_cred;
.group_leader = &tsk, \
RCU_INIT_POINTER(.real_cred, &init_cred), \
RCU_INIT_POINTER(.cred, &init_cred), \
- .comm = "swapper", \
+ .comm = INIT_TASK_COMM, \
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 25b808631cd..fd7ff3d91e6 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define rounddown_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
- (n == 1) ? 0 : \
(1UL << ilog2(n))) : \
__rounddown_pow_of_two(n) \
)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3dc3a8c2c48..4baadd18f4a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -10,6 +10,7 @@
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
+#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
#include <linux/range.h>
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 415f2db414e..c8ef9bc54d5 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -218,6 +218,7 @@ struct mmc_card {
#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
+#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
/* byte mode */
unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
#define MMC_NO_POWER_NOTIFICATION 0
@@ -433,6 +434,11 @@ static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
}
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
#define mmc_card_name(c) ((c)->cid.prod_name)
#define mmc_card_id(c) (dev_name(&(c)->dev))
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cbeb5867cff..a82ad4dd306 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2536,6 +2536,8 @@ extern void net_disable_timestamp(void);
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
extern void dev_seq_stop(struct seq_file *seq, void *v);
+extern int dev_seq_open_ops(struct inode *inode, struct file *file,
+ const struct seq_operations *ops);
#endif
extern int netdev_class_create_file(struct class_attribute *class_attr);
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index e3d0b389024..7ef68724f0f 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -12,7 +12,7 @@ struct pci_ats {
unsigned int is_enabled:1; /* Enable bit is set */
};
-#ifdef CONFIG_PCI_IOV
+#ifdef CONFIG_PCI_ATS
extern int pci_enable_ats(struct pci_dev *dev, int ps);
extern void pci_disable_ats(struct pci_dev *dev);
@@ -29,7 +29,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
return dev->ats && dev->ats->is_enabled;
}
-#else /* CONFIG_PCI_IOV */
+#else /* CONFIG_PCI_ATS */
static inline int pci_enable_ats(struct pci_dev *dev, int ps)
{
@@ -50,7 +50,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
return 0;
}
-#endif /* CONFIG_PCI_IOV */
+#endif /* CONFIG_PCI_ATS */
#ifdef CONFIG_PCI_PRI
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 337df0d5d5f..7cda65b5f79 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -338,7 +338,7 @@ struct pci_dev {
struct list_head msi_list;
#endif
struct pci_vpd *vpd;
-#ifdef CONFIG_PCI_IOV
+#ifdef CONFIG_PCI_ATS
union {
struct pci_sriov *sriov; /* SR-IOV capability related */
struct pci_dev *physfn; /* the PF this VF is associated with */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 172ba70306d..2aaee0ca9da 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -517,8 +517,12 @@
#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
+#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
+#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603
#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604
+#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1e9ebe5e009..b1f89122bf6 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -822,6 +822,7 @@ struct perf_event {
int mmap_locked;
struct user_struct *mmap_user;
struct ring_buffer *rb;
+ struct list_head rb_entry;
/* poll related */
wait_queue_head_t waitq;
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index c5336705921..7281d5acf2f 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -30,7 +30,7 @@
*/
struct tc_stats {
- __u64 bytes; /* NUmber of enqueues bytes */
+ __u64 bytes; /* Number of enqueued bytes */
__u32 packets; /* Number of enqueued packets */
__u32 drops; /* Packets dropped because of lack of resources */
__u32 overlimits; /* Number of throttle events when this
@@ -297,7 +297,7 @@ struct tc_htb_glob {
__u32 debug; /* debug flags */
/* stats */
- __u32 direct_pkts; /* count of non shapped packets */
+ __u32 direct_pkts; /* count of non shaped packets */
};
enum {
TCA_HTB_UNSPEC,
@@ -503,7 +503,7 @@ enum {
};
#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
-/* State transition probablities for 4 state model */
+/* State transition probabilities for 4 state model */
struct tc_netem_gimodel {
__u32 p13;
__u32 p31;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 5c4c8b18c8b..3f3ed83a9aa 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -54,118 +54,145 @@ typedef struct pm_message {
/**
* struct dev_pm_ops - device PM callbacks
*
- * Several driver power state transitions are externally visible, affecting
+ * Several device power state transitions are externally visible, affecting
* the state of pending I/O queues and (for drivers that touch hardware)
* interrupts, wakeups, DMA, and other hardware state. There may also be
- * internal transitions to various low power modes, which are transparent
+ * internal transitions to various low-power modes which are transparent
* to the rest of the driver stack (such as a driver that's ON gating off
* clocks which are not in active use).
*
- * The externally visible transitions are handled with the help of the following
- * callbacks included in this structure:
- *
- * @prepare: Prepare the device for the upcoming transition, but do NOT change
- * its hardware state. Prevent new children of the device from being
- * registered after @prepare() returns (the driver's subsystem and
- * generally the rest of the kernel is supposed to prevent new calls to the
- * probe method from being made too once @prepare() has succeeded). If
- * @prepare() detects a situation it cannot handle (e.g. registration of a
- * child already in progress), it may return -EAGAIN, so that the PM core
- * can execute it once again (e.g. after the new child has been registered)
- * to recover from the race condition. This method is executed for all
- * kinds of suspend transitions and is followed by one of the suspend
- * callbacks: @suspend(), @freeze(), or @poweroff().
- * The PM core executes @prepare() for all devices before starting to
- * execute suspend callbacks for any of them, so drivers may assume all of
- * the other devices to be present and functional while @prepare() is being
- * executed. In particular, it is safe to make GFP_KERNEL memory
- * allocations from within @prepare(). However, drivers may NOT assume
- * anything about the availability of the user space at that time and it
- * is not correct to request firmware from within @prepare() (it's too
- * late to do that). [To work around this limitation, drivers may
- * register suspend and hibernation notifiers that are executed before the
- * freezing of tasks.]
+ * The externally visible transitions are handled with the help of callbacks
+ * included in this structure in such a way that two levels of callbacks are
+ * involved. First, the PM core executes callbacks provided by PM domains,
+ * device types, classes and bus types. They are the subsystem-level callbacks
+ * supposed to execute callbacks provided by device drivers, although they may
+ * choose not to do that. If the driver callbacks are executed, they have to
+ * collaborate with the subsystem-level callbacks to achieve the goals
+ * appropriate for the given system transition, given transition phase and the
+ * subsystem the device belongs to.
+ *
+ * @prepare: The principal role of this callback is to prevent new children of
+ * the device from being registered after it has returned (the driver's
+ * subsystem and generally the rest of the kernel is supposed to prevent
+ * new calls to the probe method from being made too once @prepare() has
+ * succeeded). If @prepare() detects a situation it cannot handle (e.g.
+ * registration of a child already in progress), it may return -EAGAIN, so
+ * that the PM core can execute it once again (e.g. after a new child has
+ * been registered) to recover from the race condition.
+ * This method is executed for all kinds of suspend transitions and is
+ * followed by one of the suspend callbacks: @suspend(), @freeze(), or
+ * @poweroff(). The PM core executes subsystem-level @prepare() for all
+ * devices before starting to invoke suspend callbacks for any of them, so
+ * generally devices may be assumed to be functional or to respond to
+ * runtime resume requests while @prepare() is being executed. However,
+ * device drivers may NOT assume anything about the availability of user
+ * space at that time and it is NOT valid to request firmware from within
+ * @prepare() (it's too late to do that). It also is NOT valid to allocate
+ * substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
+ * [To work around these limitations, drivers may register suspend and
+ * hibernation notifiers to be executed before the freezing of tasks.]
*
* @complete: Undo the changes made by @prepare(). This method is executed for
* all kinds of resume transitions, following one of the resume callbacks:
* @resume(), @thaw(), @restore(). Also called if the state transition
- * fails before the driver's suspend callback (@suspend(), @freeze(),
- * @poweroff()) can be executed (e.g. if the suspend callback fails for one
+ * fails before the driver's suspend callback: @suspend(), @freeze() or
+ * @poweroff(), can be executed (e.g. if the suspend callback fails for one
* of the other devices that the PM core has unsuccessfully attempted to
* suspend earlier).
- * The PM core executes @complete() after it has executed the appropriate
- * resume callback for all devices.
+ * The PM core executes subsystem-level @complete() after it has executed
+ * the appropriate resume callbacks for all devices.
*
* @suspend: Executed before putting the system into a sleep state in which the
- * contents of main memory are preserved. Quiesce the device, put it into
- * a low power state appropriate for the upcoming system state (such as
- * PCI_D3hot), and enable wakeup events as appropriate.
+ * contents of main memory are preserved. The exact action to perform
+ * depends on the device's subsystem (PM domain, device type, class or bus
+ * type), but generally the device must be quiescent after subsystem-level
+ * @suspend() has returned, so that it doesn't do any I/O or DMA.
+ * Subsystem-level @suspend() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @resume: Executed after waking the system up from a sleep state in which the
- * contents of main memory were preserved. Put the device into the
- * appropriate state, according to the information saved in memory by the
- * preceding @suspend(). The driver starts working again, responding to
- * hardware events and software requests. The hardware may have gone
- * through a power-off reset, or it may have maintained state from the
- * previous suspend() which the driver may rely on while resuming. On most
- * platforms, there are no restrictions on availability of resources like
- * clocks during @resume().
+ * contents of main memory were preserved. The exact action to perform
+ * depends on the device's subsystem, but generally the driver is expected
+ * to start working again, responding to hardware events and software
+ * requests (the device itself may be left in a low-power state, waiting
+ * for a runtime resume to occur). The state of the device at the time its
+ * driver's @resume() callback is run depends on the platform and subsystem
+ * the device belongs to. On most platforms, there are no restrictions on
+ * availability of resources like clocks during @resume().
+ * Subsystem-level @resume() is executed for all devices after invoking
+ * subsystem-level @resume_noirq() for all of them.
*
* @freeze: Hibernation-specific, executed before creating a hibernation image.
- * Quiesce operations so that a consistent image can be created, but do NOT
- * otherwise put the device into a low power device state and do NOT emit
- * system wakeup events. Save in main memory the device settings to be
- * used by @restore() during the subsequent resume from hibernation or by
- * the subsequent @thaw(), if the creation of the image or the restoration
- * of main memory contents from it fails.
+ * Analogous to @suspend(), but it should not enable the device to signal
+ * wakeup events or change its power state. The majority of subsystems
+ * (with the notable exception of the PCI bus type) expect the driver-level
+ * @freeze() to save the device settings in memory to be used by @restore()
+ * during the subsequent resume from hibernation.
+ * Subsystem-level @freeze() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @thaw: Hibernation-specific, executed after creating a hibernation image OR
- * if the creation of the image fails. Also executed after a failing
+ * if the creation of an image has failed. Also executed after a failing
* attempt to restore the contents of main memory from such an image.
* Undo the changes made by the preceding @freeze(), so the device can be
* operated in the same way as immediately before the call to @freeze().
+ * Subsystem-level @thaw() is executed for all devices after invoking
+ * subsystem-level @thaw_noirq() for all of them. It also may be executed
+ * directly after @freeze() in case of a transition error.
*
* @poweroff: Hibernation-specific, executed after saving a hibernation image.
- * Quiesce the device, put it into a low power state appropriate for the
- * upcoming system state (such as PCI_D3hot), and enable wakeup events as
- * appropriate.
+ * Analogous to @suspend(), but it need not save the device's settings in
+ * memory.
+ * Subsystem-level @poweroff() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @restore: Hibernation-specific, executed after restoring the contents of main
- * memory from a hibernation image. Driver starts working again,
- * responding to hardware events and software requests. Drivers may NOT
- * make ANY assumptions about the hardware state right prior to @restore().
- * On most platforms, there are no restrictions on availability of
- * resources like clocks during @restore().
- *
- * @suspend_noirq: Complete the operations of ->suspend() by carrying out any
- * actions required for suspending the device that need interrupts to be
- * disabled
- *
- * @resume_noirq: Prepare for the execution of ->resume() by carrying out any
- * actions required for resuming the device that need interrupts to be
- * disabled
- *
- * @freeze_noirq: Complete the operations of ->freeze() by carrying out any
- * actions required for freezing the device that need interrupts to be
- * disabled
- *
- * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
- * actions required for thawing the device that need interrupts to be
- * disabled
- *
- * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
- * actions required for handling the device that need interrupts to be
- * disabled
- *
- * @restore_noirq: Prepare for the execution of ->restore() by carrying out any
- * actions required for restoring the operations of the device that need
- * interrupts to be disabled
+ * memory from a hibernation image, analogous to @resume().
+ *
+ * @suspend_noirq: Complete the actions started by @suspend(). Carry out any
+ * additional operations required for suspending the device that might be
+ * racing with its driver's interrupt handler, which is guaranteed not to
+ * run while @suspend_noirq() is being executed.
+ * It generally is expected that the device will be in a low-power state
+ * (appropriate for the target system sleep state) after subsystem-level
+ * @suspend_noirq() has returned successfully. If the device can generate
+ * system wakeup signals and is enabled to wake up the system, it should be
+ * configured to do so at that time. However, depending on the platform
+ * and device's subsystem, @suspend() may be allowed to put the device into
+ * the low-power state and configure it to generate wakeup signals, in
+ * which case it generally is not necessary to define @suspend_noirq().
+ *
+ * @resume_noirq: Prepare for the execution of @resume() by carrying out any
+ * operations required for resuming the device that might be racing with
+ * its driver's interrupt handler, which is guaranteed not to run while
+ * @resume_noirq() is being executed.
+ *
+ * @freeze_noirq: Complete the actions started by @freeze(). Carry out any
+ * additional operations required for freezing the device that might be
+ * racing with its driver's interrupt handler, which is guaranteed not to
+ * run while @freeze_noirq() is being executed.
+ * The power state of the device should not be changed by either @freeze()
+ * or @freeze_noirq() and it should not be configured to signal system
+ * wakeup by any of these callbacks.
+ *
+ * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
+ * operations required for thawing the device that might be racing with its
+ * driver's interrupt handler, which is guaranteed not to run while
+ * @thaw_noirq() is being executed.
+ *
+ * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to
+ * @suspend_noirq(), but it need not save the device's settings in memory.
+ *
+ * @restore_noirq: Prepare for the execution of @restore() by carrying out any
+ * operations required for thawing the device that might be racing with its
+ * driver's interrupt handler, which is guaranteed not to run while
+ * @restore_noirq() is being executed. Analogous to @resume_noirq().
*
* All of the above callbacks, except for @complete(), return error codes.
* However, the error codes returned by the resume operations, @resume(),
- * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do
+ * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
* not cause the PM core to abort the resume transition during which they are
- * returned. The error codes returned in that cases are only printed by the PM
+ * returned. The error codes returned in those cases are only printed by the PM
* core to the system logs for debugging purposes. Still, it is recommended
* that drivers only return error codes from their resume methods in case of an
* unrecoverable failure (i.e. when the device being handled refuses to resume
@@ -174,31 +201,43 @@ typedef struct pm_message {
* their children.
*
* It is allowed to unregister devices while the above callbacks are being
- * executed. However, it is not allowed to unregister a device from within any
- * of its own callbacks.
+ * executed. However, a callback routine must NOT try to unregister the device
+ * it was called for, although it may unregister children of that device (for
+ * example, if it detects that a child was unplugged while the system was
+ * asleep).
+ *
+ * Refer to Documentation/power/devices.txt for more information about the role
+ * of the above callbacks in the system suspend process.
*
- * There also are the following callbacks related to run-time power management
- * of devices:
+ * There also are callbacks related to runtime power management of devices.
+ * Again, these callbacks are executed by the PM core only for subsystems
+ * (PM domains, device types, classes and bus types) and the subsystem-level
+ * callbacks are supposed to invoke the driver callbacks. Moreover, the exact
+ * actions to be performed by a device driver's callbacks generally depend on
+ * the platform and subsystem the device belongs to.
*
* @runtime_suspend: Prepare the device for a condition in which it won't be
* able to communicate with the CPU(s) and RAM due to power management.
- * This need not mean that the device should be put into a low power state.
+ * This need not mean that the device should be put into a low-power state.
* For example, if the device is behind a link which is about to be turned
* off, the device may remain at full power. If the device does go to low
- * power and is capable of generating run-time wake-up events, remote
- * wake-up (i.e., a hardware mechanism allowing the device to request a
- * change of its power state via a wake-up event, such as PCI PME) should
- * be enabled for it.
+ * power and is capable of generating runtime wakeup events, remote wakeup
+ * (i.e., a hardware mechanism allowing the device to request a change of
+ * its power state via an interrupt) should be enabled for it.
*
* @runtime_resume: Put the device into the fully active state in response to a
- * wake-up event generated by hardware or at the request of software. If
- * necessary, put the device into the full power state and restore its
+ * wakeup event generated by hardware or at the request of software. If
+ * necessary, put the device into the full-power state and restore its
* registers, so that it is fully operational.
*
- * @runtime_idle: Device appears to be inactive and it might be put into a low
- * power state if all of the necessary conditions are satisfied. Check
+ * @runtime_idle: Device appears to be inactive and it might be put into a
+ * low-power state if all of the necessary conditions are satisfied. Check
* these conditions and handle the device as appropriate, possibly queueing
* a suspend request for it. The return value is ignored by the PM core.
+ *
+ * Refer to Documentation/power/runtime_pm.txt for more information about the
+ * role of the above callbacks in device runtime power management.
+ *
*/
struct dev_pm_ops {
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index ea567321ae3..2ca8cde5459 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -35,10 +35,12 @@ struct pstore_info {
spinlock_t buf_lock; /* serialize access to 'buf' */
char *buf;
size_t bufsize;
+ struct mutex read_mutex; /* serialize open/read/close */
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi);
+ struct timespec *time, char **buf,
+ struct pstore_info *psi);
int (*write)(enum pstore_type_id type, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi);
int (*erase)(enum pstore_type_id type, u64 id,
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index a83833a1f7a..07ceb97d53f 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -35,7 +35,7 @@ struct shrinker {
/* These are for internal use */
struct list_head list;
- long nr; /* objs pending delete */
+ atomic_long_t nr_in_batch; /* objs pending delete */
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
extern void register_shrinker(struct shrinker *);
diff --git a/include/linux/sigma.h b/include/linux/sigma.h
index e2accb3164d..d0de882c0d9 100644
--- a/include/linux/sigma.h
+++ b/include/linux/sigma.h
@@ -24,7 +24,7 @@ struct sigma_firmware {
struct sigma_firmware_header {
unsigned char magic[7];
u8 version;
- u32 crc;
+ __le32 crc;
};
enum {
@@ -40,19 +40,14 @@ enum {
struct sigma_action {
u8 instr;
u8 len_hi;
- u16 len;
- u16 addr;
+ __le16 len;
+ __be16 addr;
unsigned char payload[];
};
static inline u32 sigma_action_len(struct sigma_action *sa)
{
- return (sa->len_hi << 16) | sa->len;
-}
-
-static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
-{
- return sizeof(*sa) + payload_len + (payload_len % 2);
+ return (sa->len_hi << 16) | le16_to_cpu(sa->len);
}
extern int process_sigma_firmware(struct i2c_client *client, const char *name);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index add4790b21f..e9e72bda1b7 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -85,6 +85,8 @@
* @reset: reset the device
* vdev: the virtio device
* After this, status and feature negotiation must be done again
+ * Device must not be reset from its vq/config callbacks, or in
+ * parallel with being added/removed.
* @find_vqs: find virtqueues and instantiate them.
* vdev: the virtio_device
* nvqs: the number of virtqueues to find
diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h
index 27c7edefbc8..5c7b6f0daef 100644
--- a/include/linux/virtio_mmio.h
+++ b/include/linux/virtio_mmio.h
@@ -63,7 +63,7 @@
#define VIRTIO_MMIO_GUEST_FEATURES 0x020
/* Activated features set selector - Write Only */
-#define VIRTIO_MMIO_GUEST_FEATURES_SET 0x024
+#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024
/* Guest's memory page size in bytes - Write Only */
#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index b1377b931eb..5fb2c3d10c0 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -254,7 +254,7 @@ unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
return icd ? icd->vdev : NULL;
}
@@ -279,6 +279,11 @@ static inline struct soc_camera_device *soc_camera_from_vbq(const struct videobu
return container_of(vq, struct soc_camera_device, vb_vidq);
}
+static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
+{
+ return (icd->iface << 8) | (icd->devnum + 1);
+}
+
void soc_camera_lock(struct vb2_queue *vq);
void soc_camera_unlock(struct vb2_queue *vq);
diff --git a/include/net/dst.h b/include/net/dst.h
index 4fb6c438179..6faec1a6021 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -205,12 +205,7 @@ dst_feature(const struct dst_entry *dst, u32 feature)
static inline u32 dst_mtu(const struct dst_entry *dst)
{
- u32 mtu = dst_metric_raw(dst, RTAX_MTU);
-
- if (!mtu)
- mtu = dst->ops->default_mtu(dst);
-
- return mtu;
+ return dst->ops->mtu(dst);
}
/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 9adb99845a5..e1c2ee0eef4 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -17,7 +17,7 @@ struct dst_ops {
int (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
- unsigned int (*default_mtu)(const struct dst_entry *);
+ unsigned int (*mtu)(const struct dst_entry *);
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index b897d6e6d0a..f941964a993 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -31,6 +31,7 @@
/** struct ip_options - IP Options
*
* @faddr - Saved first hop address
+ * @nexthop - Saved nexthop address in LSRR and SSRR
* @is_data - Options in __data, rather than skb
* @is_strictroute - Strict source route
* @srr_is_hit - Packet destination addr was our one
@@ -41,6 +42,7 @@
*/
struct ip_options {
__be32 faddr;
+ __be32 nexthop;
unsigned char optlen;
unsigned char srr;
unsigned char rr;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 78c83e62218..e9ff3fc5e68 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -35,6 +35,7 @@ struct inet_peer {
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
+ int redirect_genid;
unsigned long rate_last;
unsigned long pmtu_expires;
u32 pmtu_orig;
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 4283508b3e1..a88fb693938 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -67,18 +67,18 @@ struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
-extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
-extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
+extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
- if (nf_conntrack_event_cb == NULL)
+ if (net->ct.nf_conntrack_event_cb == NULL)
return;
e = nf_ct_ecache_find(ct);
@@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
int report)
{
int ret = 0;
+ struct net *net = nf_ct_net(ct);
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (notify == NULL)
goto out_unlock;
@@ -164,9 +165,8 @@ struct nf_exp_event_notifier {
int (*fcn)(unsigned int events, struct nf_exp_event *item);
};
-extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
-extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
+extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
static inline void
nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
u32 pid,
int report)
{
+ struct net *net = nf_ct_exp_net(exp);
struct nf_exp_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_expect_event_cb);
+ notify = rcu_dereference(net->ct.nf_expect_event_cb);
if (notify == NULL)
goto out_unlock;
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 0249399e51a..7a911eca0f1 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -18,6 +18,8 @@ struct netns_ct {
struct hlist_nulls_head unconfirmed;
struct hlist_nulls_head dying;
struct ip_conntrack_stat __percpu *stat;
+ struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+ struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
int sysctl_events;
unsigned int sysctl_events_retry_timeout;
int sysctl_acct;
diff --git a/include/net/red.h b/include/net/red.h
index 3319f16b3be..b72a3b83393 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -116,7 +116,7 @@ struct red_parms {
u32 qR; /* Cached random number */
unsigned long qavg; /* Average queue length: A scaled */
- psched_time_t qidlestart; /* Start of current idle period */
+ ktime_t qidlestart; /* Start of current idle period */
};
static inline u32 red_rmask(u8 Plog)
@@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p,
static inline int red_is_idling(struct red_parms *p)
{
- return p->qidlestart != PSCHED_PASTPERFECT;
+ return p->qidlestart.tv64 != 0;
}
static inline void red_start_of_idle_period(struct red_parms *p)
{
- p->qidlestart = psched_get_time();
+ p->qidlestart = ktime_get();
}
static inline void red_end_of_idle_period(struct red_parms *p)
{
- p->qidlestart = PSCHED_PASTPERFECT;
+ p->qidlestart.tv64 = 0;
}
static inline void red_restart(struct red_parms *p)
@@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p)
static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
{
- psched_time_t now;
- long us_idle;
+ s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
+ long us_idle = min_t(s64, delta, p->Scell_max);
int shift;
- now = psched_get_time();
- us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
-
/*
* The problem: ideally, average length queue recalcultion should
* be done over constant clock intervals. This is too expensive, so
diff --git a/include/net/route.h b/include/net/route.h
index db7b3432f07..91855d185b5 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -71,12 +71,12 @@ struct rtable {
struct fib_info *fi; /* for client ref to shared metrics */
};
-static inline bool rt_is_input_route(struct rtable *rt)
+static inline bool rt_is_input_route(const struct rtable *rt)
{
return rt->rt_route_iif != 0;
}
-static inline bool rt_is_output_route(struct rtable *rt)
+static inline bool rt_is_output_route(const struct rtable *rt)
{
return rt->rt_route_iif == 0;
}
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index d1e95c6ac77..5a35a2a2d3c 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -147,6 +147,7 @@ struct fcoe_ctlr {
u8 map_dest;
u8 spma;
u8 probe_tries;
+ u8 priority;
u8 dest_addr[ETH_ALEN];
u8 ctl_src_addr[ETH_ALEN];
@@ -301,6 +302,7 @@ struct fcoe_percpu_s {
* @lport: The associated local port
* @fcoe_pending_queue: The pending Rx queue of skbs
* @fcoe_pending_queue_active: Indicates if the pending queue is active
+ * @priority: Packet priority (DCB)
* @max_queue_depth: Max queue depth of pending queue
* @min_queue_depth: Min queue depth of pending queue
* @timer: The queue timer
@@ -316,6 +318,7 @@ struct fcoe_port {
struct fc_lport *lport;
struct sk_buff_head fcoe_pending_queue;
u8 fcoe_pending_queue_active;
+ u8 priority;
u32 max_queue_depth;
u32 min_queue_depth;
struct timer_list timer;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7f5fed3c89e..6873c7dd914 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -103,9 +103,10 @@ enum se_cmd_flags_table {
SCF_SCSI_NON_DATA_CDB = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
- SCF_SE_CMD_FAILED = 0x00000400,
+ SCF_FUA = 0x00000200,
SCF_SE_LUN_CMD = 0x00000800,
SCF_SE_ALLOW_EOO = 0x00001000,
+ SCF_BIDI = 0x00002000,
SCF_SENT_CHECK_CONDITION = 0x00004000,
SCF_OVERFLOW_BIT = 0x00008000,
SCF_UNDERFLOW_BIT = 0x00010000,
@@ -154,6 +155,7 @@ enum tcm_sense_reason_table {
TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
TCM_CHECK_CONDITION_NOT_READY = 0x0f,
+ TCM_RESERVATION_CONFLICT = 0x10,
};
struct se_obj {
@@ -211,7 +213,6 @@ struct t10_alua_lu_gp {
u16 lu_gp_id;
int lu_gp_valid_id;
u32 lu_gp_members;
- atomic_t lu_gp_shutdown;
atomic_t lu_gp_ref_cnt;
spinlock_t lu_gp_lock;
struct config_group lu_gp_group;
@@ -422,11 +423,9 @@ struct se_cmd {
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
- /* Transport specific error status */
- int transport_error_status;
/* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
- int check_release:1;
- int cmd_wait_set:1;
+ unsigned check_release:1;
+ unsigned cmd_wait_set:1;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
u32 se_ordered_id;
@@ -441,13 +440,10 @@ struct se_cmd {
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_node;
- struct list_head se_ordered_node;
struct list_head se_lun_node;
struct list_head se_qf_node;
struct se_device *se_dev;
struct se_dev_entry *se_deve;
- struct se_device *se_obj_ptr;
- struct se_device *se_orig_obj_ptr;
struct se_lun *se_lun;
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
@@ -463,8 +459,6 @@ struct se_cmd {
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba;
int t_tasks_failed;
- int t_tasks_fua;
- bool t_tasks_bidi;
u32 t_tasks_sg_chained_no;
atomic_t t_fe_count;
atomic_t t_se_count;
@@ -489,14 +483,6 @@ struct se_cmd {
struct work_struct work;
- /*
- * Used for pre-registered fabric SGL passthrough WRITE and READ
- * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
- * and other HW target mode fabric modules.
- */
- struct scatterlist *t_task_pt_sgl;
- u32 t_task_pt_sgl_num;
-
struct scatterlist *t_data_sg;
unsigned int t_data_nents;
struct scatterlist *t_bidi_data_sg;
@@ -562,7 +548,7 @@ struct se_node_acl {
} ____cacheline_aligned;
struct se_session {
- int sess_tearing_down:1;
+ unsigned sess_tearing_down:1;
u64 sess_bin_isid;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
@@ -683,7 +669,6 @@ struct se_subsystem_dev {
struct t10_reservation t10_pr;
spinlock_t se_dev_lock;
void *se_dev_su_ptr;
- struct list_head se_dev_node;
struct config_group se_dev_group;
/* For T10 Reservations */
struct config_group se_dev_pr_group;
@@ -692,9 +677,6 @@ struct se_subsystem_dev {
} ____cacheline_aligned;
struct se_device {
- /* Set to 1 if thread is NOT sleeping on thread_sem */
- u8 thread_active;
- u8 dev_status_timer_flags;
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
@@ -719,14 +701,10 @@ struct se_device {
u64 write_bytes;
spinlock_t stats_lock;
/* Active commands on this virtual SE device */
- atomic_t active_cmds;
atomic_t simple_cmds;
atomic_t depth_left;
atomic_t dev_ordered_id;
- atomic_t dev_tur_active;
atomic_t execute_tasks;
- atomic_t dev_status_thr_count;
- atomic_t dev_hoq_count;
atomic_t dev_ordered_sync;
atomic_t dev_qf_count;
struct se_obj dev_obj;
@@ -734,14 +712,9 @@ struct se_device {
struct se_obj dev_export_obj;
struct se_queue_obj dev_queue_obj;
spinlock_t delayed_cmd_lock;
- spinlock_t ordered_cmd_lock;
spinlock_t execute_task_lock;
- spinlock_t state_task_lock;
- spinlock_t dev_alua_lock;
spinlock_t dev_reservation_lock;
- spinlock_t dev_state_lock;
spinlock_t dev_status_lock;
- spinlock_t dev_status_thr_lock;
spinlock_t se_port_lock;
spinlock_t se_tmr_lock;
spinlock_t qf_cmd_lock;
@@ -753,14 +726,10 @@ struct se_device {
struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
- struct timer_list dev_status_timer;
/* Pointer to descriptor for processing thread */
struct task_struct *process_thread;
- pid_t process_thread_pid;
- struct task_struct *dev_mgmt_thread;
struct work_struct qf_work_queue;
struct list_head delayed_cmd_list;
- struct list_head ordered_cmd_list;
struct list_head execute_task_list;
struct list_head state_task_list;
struct list_head qf_cmd_list;
@@ -771,8 +740,6 @@ struct se_device {
struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */
struct list_head dev_list;
- /* Linked list for struct se_global->g_se_dev_list */
- struct list_head g_se_dev_list;
} ____cacheline_aligned;
struct se_hba {
@@ -834,7 +801,6 @@ struct se_port {
u32 sep_index;
struct scsi_port_stats sep_stats;
/* Used for ALUA Target Port Groups membership */
- atomic_t sep_tg_pt_gp_active;
atomic_t sep_tg_pt_secondary_offline;
/* Used for PR ALL_TG_PT=1 */
atomic_t sep_tg_pt_ref_cnt;
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index c16e9431dd0..dac4f2d859f 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -10,29 +10,6 @@
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
-#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0
-#define PYX_TRANSPORT_WRITE_PENDING 1
-
-#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1
-#define PYX_TRANSPORT_HBA_QUEUE_FULL -2
-#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3
-#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4
-#define PYX_TRANSPORT_INVALID_CDB_FIELD -5
-#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6
-#define PYX_TRANSPORT_LU_COMM_FAILURE -7
-#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
-#define PYX_TRANSPORT_WRITE_PROTECTED -9
-#define PYX_TRANSPORT_RESERVATION_CONFLICT -10
-#define PYX_TRANSPORT_ILLEGAL_REQUEST -11
-#define PYX_TRANSPORT_USE_SENSE_REASON -12
-
-#ifndef SAM_STAT_RESERVATION_CONFLICT
-#define SAM_STAT_RESERVATION_CONFLICT 0x18
-#endif
-
-#define TRANSPORT_PLUGIN_FREE 0
-#define TRANSPORT_PLUGIN_REGISTERED 1
-
#define TRANSPORT_PLUGIN_PHBA_PDEV 1
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
@@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
extern int transport_handle_cdb_direct(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
-extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern bool target_stop_task(struct se_task *task, unsigned long *flags);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index b66ebb2032c..378c7ed6760 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -307,15 +307,8 @@ struct omap_dss_board_info {
void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
};
-#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS)
/* Init with the board info */
extern int omap_display_init(struct omap_dss_board_info *board_data);
-#else
-static inline int omap_display_init(struct omap_dss_board_info *board_data)
-{
- return 0;
-}
-#endif
struct omap_display_platform_data {
struct omap_dss_board_info *board_data;
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index f0b6890370b..f6f07aa35af 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -29,8 +29,7 @@ enum xsd_sockmsg_type
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
XS_SET_TARGET,
- XS_RESTRICT,
- XS_RESET_WATCHES
+ XS_RESTRICT
};
#define XS_WRITE_NONE "NONE"
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 2e0ecfcc881..5b4293d9819 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1269,7 +1269,7 @@ void mq_clear_sbinfo(struct ipc_namespace *ns)
void mq_put_mnt(struct ipc_namespace *ns)
{
- mntput(ns->mq_mnt);
+ kern_unmount(ns->mq_mnt);
}
static int __init init_mqueue_fs(void)
@@ -1291,11 +1291,9 @@ static int __init init_mqueue_fs(void)
spin_lock_init(&mq_lock);
- init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
- if (IS_ERR(init_ipc_ns.mq_mnt)) {
- error = PTR_ERR(init_ipc_ns.mq_mnt);
+ error = mq_init_ns(&init_ipc_ns);
+ if (error)
goto out_filesystem;
- }
return 0;
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 8b5ce5d3f3e..5652101cdac 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -27,11 +27,6 @@ DEFINE_SPINLOCK(mq_lock);
*/
struct ipc_namespace init_ipc_ns = {
.count = ATOMIC_INIT(1),
-#ifdef CONFIG_POSIX_MQUEUE
- .mq_queues_max = DFLT_QUEUESMAX,
- .mq_msg_max = DFLT_MSGMAX,
- .mq_msgsize_max = DFLT_MSGSIZEMAX,
-#endif
.user_ns = &init_user_ns,
};
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d9d5648f3cd..a184470cf9b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
continue;
/* get old css_set pointer */
task_lock(tsk);
- if (tsk->flags & PF_EXITING) {
- /* ignore this task if it's going away */
- task_unlock(tsk);
- continue;
- }
oldcg = tsk->cgroups;
get_css_set(oldcg);
task_unlock(tsk);
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e411a60cc2c..fcb93fca782 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -152,6 +152,13 @@ static void freezer_destroy(struct cgroup_subsys *ss,
kfree(freezer);
}
+/* task is frozen or will freeze immediately when next it gets woken */
+static bool is_task_frozen_enough(struct task_struct *task)
+{
+ return frozen(task) ||
+ (task_is_stopped_or_traced(task) && freezing(task));
+}
+
/*
* The call to cgroup_lock() in the freezer.state write method prevents
* a write to that file racing against an attach, and hence the
@@ -224,7 +231,7 @@ static void update_if_frozen(struct cgroup *cgroup,
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
ntotal++;
- if (freezing(task) && frozen(task))
+ if (freezing(task) && is_task_frozen_enough(task))
nfrozen++;
}
@@ -276,7 +283,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
while ((task = cgroup_iter_next(cgroup, &it))) {
if (!freeze_task(task))
continue;
- if (frozen(task))
+ if (is_task_frozen_enough(task))
continue;
if (!freezing(task) && !freezer_should_skip(task))
num_cant_freeze_now++;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9fe58c46a42..0b1712dba58 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
struct cpuset, css);
}
+#ifdef CONFIG_NUMA
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+ return task->mempolicy;
+}
+#else
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+ return false;
+}
+#endif
+
+
/* bits in struct cpuset flags field */
typedef enum {
CS_CPU_EXCLUSIVE,
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
static void cpuset_change_task_nodemask(struct task_struct *tsk,
nodemask_t *newmems)
{
- bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
+ bool need_loop;
repeat:
/*
@@ -962,6 +975,14 @@ repeat:
return;
task_lock(tsk);
+ /*
+ * Determine if a loop is necessary if another thread is doing
+ * get_mems_allowed(). If at least one node remains unchanged and
+ * tsk does not have a mempolicy, then an empty nodemask will not be
+ * possible when mems_allowed is larger than a word.
+ */
+ need_loop = task_has_mempolicy(tsk) ||
+ !nodes_intersects(*newmems, tsk->mems_allowed);
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
@@ -981,11 +1002,9 @@ repeat:
/*
* Allocation of memory is very fast, we needn't sleep when waiting
- * for the read-side. No wait is necessary, however, if at least one
- * node remains unchanged.
+ * for the read-side.
*/
- while (masks_disjoint &&
- ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+ while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
task_unlock(tsk);
if (!task_curr(tsk))
yield();
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0e8457da6f9..58690af323e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
+static void ring_buffer_attach(struct perf_event *event,
+ struct ring_buffer *rb);
+
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
@@ -2171,9 +2174,10 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- perf_event_sched_in(cpuctx, ctx, task);
+ if (ctx->nr_events)
+ cpuctx->task_ctx = ctx;
- cpuctx->task_ctx = ctx;
+ perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
@@ -3190,12 +3194,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
struct ring_buffer *rb;
unsigned int events = POLL_HUP;
+ /*
+ * Race between perf_event_set_output() and perf_poll(): perf_poll()
+ * grabs the rb reference but perf_event_set_output() overrides it.
+ * Here is the timeline for two threads T1, T2:
+ * t0: T1, rb = rcu_dereference(event->rb)
+ * t1: T2, old_rb = event->rb
+ * t2: T2, event->rb = new rb
+ * t3: T2, ring_buffer_detach(old_rb)
+ * t4: T1, ring_buffer_attach(rb1)
+ * t5: T1, poll_wait(event->waitq)
+ *
+ * To avoid this problem, we grab mmap_mutex in perf_poll()
+ * thereby ensuring that the assignment of the new ring buffer
+ * and the detachment of the old buffer appear atomic to perf_poll()
+ */
+ mutex_lock(&event->mmap_mutex);
+
rcu_read_lock();
rb = rcu_dereference(event->rb);
- if (rb)
+ if (rb) {
+ ring_buffer_attach(event, rb);
events = atomic_xchg(&rb->poll, 0);
+ }
rcu_read_unlock();
+ mutex_unlock(&event->mmap_mutex);
+
poll_wait(file, &event->waitq, wait);
return events;
@@ -3496,6 +3521,53 @@ unlock:
return ret;
}
+static void ring_buffer_attach(struct perf_event *event,
+ struct ring_buffer *rb)
+{
+ unsigned long flags;
+
+ if (!list_empty(&event->rb_entry))
+ return;
+
+ spin_lock_irqsave(&rb->event_lock, flags);
+ if (!list_empty(&event->rb_entry))
+ goto unlock;
+
+ list_add(&event->rb_entry, &rb->event_list);
+unlock:
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_detach(struct perf_event *event,
+ struct ring_buffer *rb)
+{
+ unsigned long flags;
+
+ if (list_empty(&event->rb_entry))
+ return;
+
+ spin_lock_irqsave(&rb->event_lock, flags);
+ list_del_init(&event->rb_entry);
+ wake_up_all(&event->waitq);
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_wakeup(struct perf_event *event)
+{
+ struct ring_buffer *rb;
+
+ rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+ if (!rb)
+ goto unlock;
+
+ list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+ wake_up_all(&event->waitq);
+
+unlock:
+ rcu_read_unlock();
+}
+
static void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
@@ -3521,9 +3593,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
static void ring_buffer_put(struct ring_buffer *rb)
{
+ struct perf_event *event, *n;
+ unsigned long flags;
+
if (!atomic_dec_and_test(&rb->refcount))
return;
+ spin_lock_irqsave(&rb->event_lock, flags);
+ list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+ list_del_init(&event->rb_entry);
+ wake_up_all(&event->waitq);
+ }
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+
call_rcu(&rb->rcu_head, rb_free_rcu);
}
@@ -3546,6 +3628,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
vma->vm_mm->pinned_vm -= event->mmap_locked;
rcu_assign_pointer(event->rb, NULL);
+ ring_buffer_detach(event, rb);
mutex_unlock(&event->mmap_mutex);
ring_buffer_put(rb);
@@ -3700,7 +3783,7 @@ static const struct file_operations perf_fops = {
void perf_event_wakeup(struct perf_event *event)
{
- wake_up_all(&event->waitq);
+ ring_buffer_wakeup(event);
if (event->pending_kill) {
kill_fasync(&event->fasync, SIGIO, event->pending_kill);
@@ -5822,6 +5905,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
INIT_LIST_HEAD(&event->group_entry);
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
+ INIT_LIST_HEAD(&event->rb_entry);
+
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending, perf_pending_event);
@@ -6028,6 +6113,8 @@ set:
old_rb = event->rb;
rcu_assign_pointer(event->rb, rb);
+ if (old_rb)
+ ring_buffer_detach(event, old_rb);
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 09097dd8116..64568a69937 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -22,6 +22,9 @@ struct ring_buffer {
local_t lost; /* nr records lost */
long watermark; /* wakeup watermark */
+ /* poll crap */
+ spinlock_t event_lock;
+ struct list_head event_list;
struct perf_event_mmap_page *user_page;
void *data_pages[0];
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index a2a29205cc0..7f3011c6b57 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
rb->writable = 1;
atomic_set(&rb->refcount, 1);
+
+ INIT_LIST_HEAD(&rb->event_list);
+ spin_lock_init(&rb->event_lock);
}
#ifndef CONFIG_PERF_USE_VMALLOC
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 422e567eecf..ae34bf51682 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
unsigned long newstate, int reprogram)
{
+ struct timerqueue_node *next_timer;
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
- if (&timer->node == timerqueue_getnext(&base->active)) {
+ next_timer = timerqueue_getnext(&base->active);
+ timerqueue_del(&base->active, &timer->node);
+ if (&timer->node == next_timer) {
#ifdef CONFIG_HIGH_RES_TIMERS
/* Reprogram the clock event device. if enabled */
if (reprogram && hrtimer_hres_active()) {
@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
}
#endif
}
- timerqueue_del(&base->active, &timer->node);
if (!timerqueue_getnext(&base->active))
base->cpu_base->active_bases &= ~(1 << base->index);
out:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 67ce837ae52..1da999f5e74 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -623,8 +623,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
static int irq_wait_for_interrupt(struct irqaction *action)
{
+ set_current_state(TASK_INTERRUPTIBLE);
+
while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
if (test_and_clear_bit(IRQTF_RUNTHREAD,
&action->thread_flags)) {
@@ -632,7 +633,9 @@ static int irq_wait_for_interrupt(struct irqaction *action)
return 0;
}
schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
}
+ __set_current_state(TASK_RUNNING);
return -1;
}
@@ -1596,7 +1599,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
return -ENOMEM;
action->handler = handler;
- action->flags = IRQF_PERCPU;
+ action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
action->name = devname;
action->percpu_dev_id = dev_id;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index b5f4742693c..dc813a948be 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
*/
action = desc->action;
if (!action || !(action->flags & IRQF_SHARED) ||
- (action->flags & __IRQF_TIMER) || !action->next)
+ (action->flags & __IRQF_TIMER) ||
+ (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+ !action->next)
goto out;
/* Already running on another processor */
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index bbdfe2a462a..66ff7109f69 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key)
return;
jump_label_lock();
- if (atomic_add_return(1, &key->enabled) == 1)
+ if (atomic_read(&key->enabled) == 0)
jump_label_update(key, JUMP_LABEL_ENABLE);
+ atomic_inc(&key->enabled);
jump_label_unlock();
}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index e69434b070d..b2e08c932d9 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -44,6 +44,7 @@
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
+#include <linux/kmemcheck.h>
#include <asm/sections.h>
@@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
- memset(lock, 0, sizeof(*lock));
+ int i;
+
+ kmemcheck_mark_initialized(lock, sizeof(*lock));
+
+ for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+ lock->class_cache[i] = NULL;
#ifdef CONFIG_LOCK_STAT
lock->cpu = raw_smp_processor_id();
diff --git a/kernel/printk.c b/kernel/printk.c
index 1455a0d4eed..7982a0a841e 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1293,10 +1293,11 @@ again:
raw_spin_lock(&logbuf_lock);
if (con_start != log_end)
retry = 1;
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
if (retry && console_trylock())
goto again;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
if (wake_klogd)
wake_up_klogd();
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 0e9344a71be..d6b149ccf92 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
+#include <linux/init_task.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
@@ -4810,6 +4811,9 @@ EXPORT_SYMBOL(wait_for_completion);
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
+ *
+ * The return value is 0 if timed out, and positive (at least 1, or number of
+ * jiffies left till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -4824,6 +4828,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
*
* This waits for completion of a specific task to be signaled. It is
* interruptible.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
@@ -4841,6 +4847,9 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
@@ -4856,6 +4865,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
*
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
@@ -4874,6 +4885,9 @@ EXPORT_SYMBOL(wait_for_completion_killable);
* This waits for either a completion of a specific task to be
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_killable_timeout(struct completion *x,
@@ -6099,6 +6113,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
*/
idle->sched_class = &idle_sched_class;
ftrace_graph_init_idle_task(idle, cpu);
+#if defined(CONFIG_SMP)
+ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
}
/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5c9e67923b7..8a39fa3e3c6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -772,19 +772,32 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
list_del_leaf_cfs_rq(cfs_rq);
}
+static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+{
+ long tg_weight;
+
+ /*
+ * Use this CPU's actual weight instead of the last load_contribution
+ * to gain a more accurate current total weight. See
+ * update_cfs_rq_load_contribution().
+ */
+ tg_weight = atomic_read(&tg->load_weight);
+ tg_weight -= cfs_rq->load_contribution;
+ tg_weight += cfs_rq->load.weight;
+
+ return tg_weight;
+}
+
static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
- long load_weight, load, shares;
+ long tg_weight, load, shares;
+ tg_weight = calc_tg_weight(tg, cfs_rq);
load = cfs_rq->load.weight;
- load_weight = atomic_read(&tg->load_weight);
- load_weight += load;
- load_weight -= cfs_rq->load_contribution;
-
shares = (tg->shares * load);
- if (load_weight)
- shares /= load_weight;
+ if (tg_weight)
+ shares /= tg_weight;
if (shares < MIN_SHARES)
shares = MIN_SHARES;
@@ -1743,7 +1756,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
- if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running)
+ if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
return;
__return_cfs_rq_runtime(cfs_rq);
@@ -2036,36 +2049,100 @@ static void task_waking_fair(struct task_struct *p)
* Adding load to a group doesn't make a group heavier, but can cause movement
* of group shares between cpus. Assuming the shares were perfectly aligned one
* can calculate the shift in shares.
+ *
+ * Calculate the effective load difference if @wl is added (subtracted) to @tg
+ * on this @cpu and results in a total addition (subtraction) of @wg to the
+ * total group weight.
+ *
+ * Given a runqueue weight distribution (rw_i) we can compute a shares
+ * distribution (s_i) using:
+ *
+ * s_i = rw_i / \Sum rw_j (1)
+ *
+ * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
+ * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
+ * shares distribution (s_i):
+ *
+ * rw_i = { 2, 4, 1, 0 }
+ * s_i = { 2/7, 4/7, 1/7, 0 }
+ *
+ * As per wake_affine() we're interested in the load of two CPUs (the CPU the
+ * task used to run on and the CPU the waker is running on), we need to
+ * compute the effect of waking a task on either CPU and, in case of a sync
+ * wakeup, compute the effect of the current task going to sleep.
+ *
+ * So for a change of @wl to the local @cpu with an overall group weight change
+ * of @wl we can compute the new shares distribution (s'_i) using:
+ *
+ * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
+ *
+ * Suppose we're interested in CPUs 0 and 1, and want to compute the load
+ * differences in waking a task to CPU 0. The additional task changes the
+ * weight and shares distributions like:
+ *
+ * rw'_i = { 3, 4, 1, 0 }
+ * s'_i = { 3/8, 4/8, 1/8, 0 }
+ *
+ * We can then compute the difference in effective weight by using:
+ *
+ * dw_i = S * (s'_i - s_i) (3)
+ *
+ * Where 'S' is the group weight as seen by its parent.
+ *
+ * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
+ * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
+ * 4/7) times the weight of the group.
*/
static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
{
struct sched_entity *se = tg->se[cpu];
- if (!tg->parent)
+ if (!tg->parent) /* the trivial, non-cgroup case */
return wl;
for_each_sched_entity(se) {
- long lw, w;
+ long w, W;
tg = se->my_q->tg;
- w = se->my_q->load.weight;
- /* use this cpu's instantaneous contribution */
- lw = atomic_read(&tg->load_weight);
- lw -= se->my_q->load_contribution;
- lw += w + wg;
+ /*
+ * W = @wg + \Sum rw_j
+ */
+ W = wg + calc_tg_weight(tg, se->my_q);
- wl += w;
+ /*
+ * w = rw_i + @wl
+ */
+ w = se->my_q->load.weight + wl;
- if (lw > 0 && wl < lw)
- wl = (wl * tg->shares) / lw;
+ /*
+ * wl = S * s'_i; see (2)
+ */
+ if (W > 0 && w < W)
+ wl = (w * tg->shares) / W;
else
wl = tg->shares;
- /* zero point is MIN_SHARES */
+ /*
+ * Per the above, wl is the new se->load.weight value; since
+ * those are clipped to [MIN_SHARES, ...) do so now. See
+ * calc_cfs_shares().
+ */
if (wl < MIN_SHARES)
wl = MIN_SHARES;
+
+ /*
+ * wl = dw_i = S * (s'_i - s_i); see (3)
+ */
wl -= se->load.weight;
+
+ /*
+ * Recursively apply this logic to all parent groups to compute
+ * the final effective load change on the root group. Since
+ * only the @tg group gets extra weight, all parent groups can
+ * only redistribute existing shares. @wl is the shift in shares
+ * resulting from this level per the above.
+ */
wg = 0;
}
@@ -2249,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
struct sched_domain *sd;
- int i;
+ struct sched_group *sg;
+ int i, smt = 0;
/*
* If the task is going to be woken-up on this cpu and if it is
@@ -2269,25 +2347,40 @@ static int select_idle_sibling(struct task_struct *p, int target)
* Otherwise, iterate the domains and find an elegible idle cpu.
*/
rcu_read_lock();
+again:
for_each_domain(target, sd) {
+ if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
+ continue;
+
+ if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
+ break;
+
if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
break;
- for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
- if (idle_cpu(i)) {
- target = i;
- break;
+ sg = sd->groups;
+ do {
+ if (!cpumask_intersects(sched_group_cpus(sg),
+ tsk_cpus_allowed(p)))
+ goto next;
+
+ for_each_cpu(i, sched_group_cpus(sg)) {
+ if (!idle_cpu(i))
+ goto next;
}
- }
- /*
- * Lets stop looking for an idle sibling when we reached
- * the domain that spans the current cpu and prev_cpu.
- */
- if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
- cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
- break;
+ target = cpumask_first_and(sched_group_cpus(sg),
+ tsk_cpus_allowed(p));
+ goto done;
+next:
+ sg = sg->next;
+ } while (sg != sd->groups);
+ }
+ if (!smt) {
+ smt = 1;
+ goto again;
}
+done:
rcu_read_unlock();
return target;
@@ -3511,7 +3604,7 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
}
/**
- * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+ * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @sd: sched_domain whose statistics are to be updated.
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index efa0a7b75dd..84802245abd 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -67,3 +67,4 @@ SCHED_FEAT(NONTASK_POWER, 1)
SCHED_FEAT(TTWU_QUEUE, 1)
SCHED_FEAT(FORCE_SD_OVERLAP, 0)
+SCHED_FEAT(RT_RUNTIME_SHARE, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 056cbd2e2a2..583a1368afe 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
{
int more = 0;
+ if (!sched_feat(RT_RUNTIME_SHARE))
+ return more;
+
if (rt_rq->rt_time > rt_rq->rt_runtime) {
raw_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 6318b511afa..a650694883a 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
fput(file);
out_putname:
- putname(pathname);
+ __putname(pathname);
out:
return result;
}
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index c436e790b21..8a46f5d6450 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
struct alarm *alarm;
ktime_t expired = next->expires;
- if (expired.tv64 >= now.tv64)
+ if (expired.tv64 > now.tv64)
break;
alarm = container_of(next, struct alarm, node);
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 1ecd6ba36d6..c4eb71c8b2e 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -387,6 +387,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
* released list and do a notify add later.
*/
if (old) {
+ old->event_handler = clockevents_handle_noop;
clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
list_del(&old->list);
list_add(&old->list, &clockevents_released);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index cf52fda2e09..d3ad022136e 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -492,6 +492,22 @@ void clocksource_touch_watchdog(void)
}
/**
+ * clocksource_max_adjustment- Returns max adjustment amount
+ * @cs: Pointer to clocksource
+ *
+ */
+static u32 clocksource_max_adjustment(struct clocksource *cs)
+{
+ u64 ret;
+ /*
+ * We won't try to correct for more then 11% adjustments (110,000 ppm),
+ */
+ ret = (u64)cs->mult * 11;
+ do_div(ret,100);
+ return (u32)ret;
+}
+
+/**
* clocksource_max_deferment - Returns max time the clocksource can be deferred
* @cs: Pointer to clocksource
*
@@ -503,25 +519,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
/*
* Calculate the maximum number of cycles that we can pass to the
* cyc2ns function without overflowing a 64-bit signed result. The
- * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
- * is equivalent to the below.
- * max_cycles < (2^63)/cs->mult
- * max_cycles < 2^(log2((2^63)/cs->mult))
- * max_cycles < 2^(log2(2^63) - log2(cs->mult))
- * max_cycles < 2^(63 - log2(cs->mult))
- * max_cycles < 1 << (63 - log2(cs->mult))
+ * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
+ * which is equivalent to the below.
+ * max_cycles < (2^63)/(cs->mult + cs->maxadj)
+ * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
+ * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
+ * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
+ * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
* Please note that we add 1 to the result of the log2 to account for
* any rounding errors, ensure the above inequality is satisfied and
* no overflow will occur.
*/
- max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
+ max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
/*
* The actual maximum number of cycles we can defer the clocksource is
* determined by the minimum of max_cycles and cs->mask.
+ * Note: Here we subtract the maxadj to make sure we don't sleep for
+ * too long if there's a large negative adjustment.
*/
max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
- max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
+ max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
+ cs->shift);
/*
* To ensure that the clocksource does not wrap whilst we are idle,
@@ -529,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
* note a margin of 12.5% is used because this can be computed with
* a shift, versus say 10% which would require division.
*/
- return max_nsecs - (max_nsecs >> 5);
+ return max_nsecs - (max_nsecs >> 3);
}
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -628,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs)
/**
* __clocksource_updatefreq_scale - Used update clocksource with new freq
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
* @scale: Scale factor multiplied against freq to get clocksource hz
* @freq: clocksource frequency (cycles per second) divided by scale
*
@@ -640,7 +659,6 @@ static void clocksource_enqueue(struct clocksource *cs)
void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
{
u64 sec;
-
/*
* Calc the maximum number of seconds which we can run before
* wrapping around. For clocksources which have a mask > 32bit
@@ -651,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
* ~ 0.06ppm granularity for NTP. We apply the same 12.5%
* margin as we do in clocksource_max_deferment()
*/
- sec = (cs->mask - (cs->mask >> 5));
+ sec = (cs->mask - (cs->mask >> 3));
do_div(sec, freq);
do_div(sec, scale);
if (!sec)
@@ -661,13 +679,27 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
NSEC_PER_SEC / scale, sec * scale);
+
+ /*
+ * for clocksources that have large mults, to avoid overflow.
+ * Since mult may be adjusted by ntp, add an safety extra margin
+ *
+ */
+ cs->maxadj = clocksource_max_adjustment(cs);
+ while ((cs->mult + cs->maxadj < cs->mult)
+ || (cs->mult - cs->maxadj > cs->mult)) {
+ cs->mult >>= 1;
+ cs->shift--;
+ cs->maxadj = clocksource_max_adjustment(cs);
+ }
+
cs->max_idle_ns = clocksource_max_deferment(cs);
}
EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
/**
* __clocksource_register_scale - Used to install new clocksources
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
* @scale: Scale factor multiplied against freq to get clocksource hz
* @freq: clocksource frequency (cycles per second) divided by scale
*
@@ -695,12 +727,18 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
/**
* clocksource_register - Used to install new clocksources
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
*
* Returns -EBUSY if registration fails, zero otherwise.
*/
int clocksource_register(struct clocksource *cs)
{
+ /* calculate max adjustment for given mult/shift */
+ cs->maxadj = clocksource_max_adjustment(cs);
+ WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
+ "Clocksource %s might overflow on 11%% adjustment\n",
+ cs->name);
+
/* calculate max idle time permitted for this clocksource */
cs->max_idle_ns = clocksource_max_deferment(cs);
@@ -723,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
/**
* clocksource_change_rating - Change the rating of a registered clocksource
+ * @cs: clocksource to be changed
+ * @rating: new rating
*/
void clocksource_change_rating(struct clocksource *cs, int rating)
{
@@ -734,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating);
/**
* clocksource_unregister - remove a registered clocksource
+ * @cs: clocksource to be unregistered
*/
void clocksource_unregister(struct clocksource *cs)
{
@@ -749,6 +790,7 @@ EXPORT_SYMBOL(clocksource_unregister);
/**
* sysfs_show_current_clocksources - sysfs interface for current clocksource
* @dev: unused
+ * @attr: unused
* @buf: char buffer to be filled with clocksource list
*
* Provides sysfs interface for listing current clocksource.
@@ -769,6 +811,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
/**
* sysfs_override_clocksource - interface for manually overriding clocksource
* @dev: unused
+ * @attr: unused
* @buf: name of override clocksource
* @count: length of buffer
*
@@ -804,6 +847,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
/**
* sysfs_show_available_clocksources - sysfs interface for listing clocksource
* @dev: unused
+ * @attr: unused
* @buf: char buffer to be filled with clocksource list
*
* Provides sysfs interface for listing registered clocksources
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f954282d9a8..fd4a7b1625a 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
- clockevents_exchange_device(NULL, dev);
+ clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
tick_broadcast_device.evtdev = dev;
if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(dev);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2b021b0e850..237841378c0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
nsecs += timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
} while (read_seqretry(&xtime_lock, seq));
/*
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
*ts = xtime;
tomono = wall_to_monotonic;
nsecs = timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
} while (read_seqretry(&xtime_lock, seq));
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset)
s64 error, interval = timekeeper.cycle_interval;
int adj;
+ /*
+ * The point of this is to check if the error is greater then half
+ * an interval.
+ *
+ * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
+ *
+ * Note we subtract one in the shift, so that error is really error*2.
+ * This "saves" dividing(shifting) intererval twice, but keeps the
+ * (error > interval) comparision as still measuring if error is
+ * larger then half an interval.
+ *
+ * Note: It does not "save" on aggrivation when reading the code.
+ */
error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
if (error > interval) {
+ /*
+ * We now divide error by 4(via shift), which checks if
+ * the error is greater then twice the interval.
+ * If it is greater, we need a bigadjust, if its smaller,
+ * we can adjust by 1.
+ */
error >>= 2;
+ /*
+ * XXX - In update_wall_time, we round up to the next
+ * nanosecond, and store the amount rounded up into
+ * the error. This causes the likely below to be unlikely.
+ *
+ * The properfix is to avoid rounding up by using
+ * the high precision timekeeper.xtime_nsec instead of
+ * xtime.tv_nsec everywhere. Fixing this will take some
+ * time.
+ */
if (likely(error <= interval))
adj = 1;
else
adj = timekeeping_bigadjust(error, &interval, &offset);
} else if (error < -interval) {
+ /* See comment above, this is just switched for the negative */
error >>= 2;
if (likely(error >= -interval)) {
adj = -1;
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset)
offset = -offset;
} else
adj = timekeeping_bigadjust(error, &interval, &offset);
- } else
+ } else /* No adjustment needed */
return;
+ WARN_ONCE(timekeeper.clock->maxadj &&
+ (timekeeper.mult + adj > timekeeper.clock->mult +
+ timekeeper.clock->maxadj),
+ "Adjusting %s more then 11%% (%ld vs %ld)\n",
+ timekeeper.clock->name, (long)timekeeper.mult + adj,
+ (long)timekeeper.clock->mult +
+ timekeeper.clock->maxadj);
+ /*
+ * So the following can be confusing.
+ *
+ * To keep things simple, lets assume adj == 1 for now.
+ *
+ * When adj != 1, remember that the interval and offset values
+ * have been appropriately scaled so the math is the same.
+ *
+ * The basic idea here is that we're increasing the multiplier
+ * by one, this causes the xtime_interval to be incremented by
+ * one cycle_interval. This is because:
+ * xtime_interval = cycle_interval * mult
+ * So if mult is being incremented by one:
+ * xtime_interval = cycle_interval * (mult + 1)
+ * Its the same as:
+ * xtime_interval = (cycle_interval * mult) + cycle_interval
+ * Which can be shortened to:
+ * xtime_interval += cycle_interval
+ *
+ * So offset stores the non-accumulated cycles. Thus the current
+ * time (in shifted nanoseconds) is:
+ * now = (offset * adj) + xtime_nsec
+ * Now, even though we're adjusting the clock frequency, we have
+ * to keep time consistent. In other words, we can't jump back
+ * in time, and we also want to avoid jumping forward in time.
+ *
+ * So given the same offset value, we need the time to be the same
+ * both before and after the freq adjustment.
+ * now = (offset * adj_1) + xtime_nsec_1
+ * now = (offset * adj_2) + xtime_nsec_2
+ * So:
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * adj_2) + xtime_nsec_2
+ * And we know:
+ * adj_2 = adj_1 + 1
+ * So:
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * (adj_1+1)) + xtime_nsec_2
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * adj_1) + offset + xtime_nsec_2
+ * Canceling the sides:
+ * xtime_nsec_1 = offset + xtime_nsec_2
+ * Which gives us:
+ * xtime_nsec_2 = xtime_nsec_1 - offset
+ * Which simplfies to:
+ * xtime_nsec -= offset
+ *
+ * XXX - TODO: Doc ntp_error calculation.
+ */
timekeeper.mult += adj;
timekeeper.xtime_interval += interval;
timekeeper.xtime_nsec -= offset;
diff --git a/kernel/timer.c b/kernel/timer.c
index dbaa62422b1..9c3c62b0c4b 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid)
int pid;
rcu_read_lock();
- pid = task_tgid_vnr(current->real_parent);
+ pid = task_tgid_vnr(rcu_dereference(current->real_parent));
rcu_read_unlock();
return pid;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 900b409543d..b1e8943fed1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -152,7 +152,6 @@ void clear_ftrace_function(void)
ftrace_pid_function = ftrace_stub;
}
-#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
/*
* For those archs that do not test ftrace_trace_stop in their
@@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
if (!src->count) {
free_ftrace_hash_rcu(*dst);
rcu_assign_pointer(*dst, EMPTY_HASH);
- return 0;
+ /* still need to update the function records */
+ ret = 0;
+ goto out;
}
/*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 581876f9f38..c212a7f934e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
/* First see if we did not already create this dir */
list_for_each_entry(system, &event_subsystems, list) {
if (strcmp(system->name, name) == 0) {
- __get_system(system);
system->nr_events++;
return system->entry;
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 816d3d07497..95dc31efd6d 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1649,7 +1649,9 @@ static int replace_system_preds(struct event_subsystem *system,
*/
err = replace_preds(call, NULL, ps, filter_string, true);
if (err)
- goto fail;
+ call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+ else
+ call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
}
list_for_each_entry(call, &ftrace_events, list) {
@@ -1658,6 +1660,9 @@ static int replace_system_preds(struct event_subsystem *system,
if (strcmp(call->class->system, system->name) != 0)
continue;
+ if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
+ continue;
+
filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
if (!filter_item)
goto fail_mem;
@@ -1686,7 +1691,7 @@ static int replace_system_preds(struct event_subsystem *system,
* replace the filter for the call.
*/
filter = call->filter;
- call->filter = filter_item->filter;
+ rcu_assign_pointer(call->filter, filter_item->filter);
filter_item->filter = filter;
fail = false;
@@ -1741,7 +1746,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
filter = call->filter;
if (!filter)
goto out_unlock;
- call->filter = NULL;
+ RCU_INIT_POINTER(call->filter, NULL);
/* Make sure the filter is not being used */
synchronize_sched();
__free_filter(filter);
@@ -1782,7 +1787,7 @@ out:
* string
*/
tmp = call->filter;
- call->filter = filter;
+ rcu_assign_pointer(call->filter, filter);
if (tmp) {
/* Make sure the call is done with the filter */
synchronize_sched();
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 74c6c7fce74..fea790a2b17 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket,
static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
{
- return ((a->dev_addr == a->dev_addr) &&
+ return ((a->dev_addr == b->dev_addr) &&
(a->dev == b->dev)) ? true : false;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index c0018f2d50e..c106d3b3cc6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2407,7 +2407,6 @@ static ssize_t generic_perform_write(struct file *file,
iov_iter_count(i));
again:
-
/*
* Bring in the user page that we will copy from _first_.
* Otherwise there's a nasty deadlock on copying from the
@@ -2463,7 +2462,10 @@ again:
written += copied;
balance_dirty_pages_ratelimited(mapping);
-
+ if (fatal_signal_pending(current)) {
+ status = -EINTR;
+ break;
+ }
} while (iov_iter_count(i));
return written ? written : status;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4298abaae15..36b3d988b4e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2259,12 +2259,8 @@ static void khugepaged_do_scan(struct page **hpage)
static void khugepaged_alloc_sleep(void)
{
- DEFINE_WAIT(wait);
- add_wait_queue(&khugepaged_wait, &wait);
- schedule_timeout_interruptible(
- msecs_to_jiffies(
- khugepaged_alloc_sleep_millisecs));
- remove_wait_queue(&khugepaged_wait, &wait);
+ wait_event_freezable_timeout(khugepaged_wait, false,
+ msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}
#ifndef CONFIG_NUMA
@@ -2313,14 +2309,10 @@ static void khugepaged_loop(void)
if (unlikely(kthread_should_stop()))
break;
if (khugepaged_has_work()) {
- DEFINE_WAIT(wait);
if (!khugepaged_scan_sleep_millisecs)
continue;
- add_wait_queue(&khugepaged_wait, &wait);
- schedule_timeout_interruptible(
- msecs_to_jiffies(
- khugepaged_scan_sleep_millisecs));
- remove_wait_queue(&khugepaged_wait, &wait);
+ wait_event_freezable_timeout(khugepaged_wait, false,
+ msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
} else if (khugepaged_enabled())
wait_event_freezable(khugepaged_wait,
khugepaged_wait_event());
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bb28a5f9db8..73f17c0293c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
__SetPageTail(p);
+ set_page_count(p, 0);
p->first_page = page;
}
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6aff93c98ac..b63f5f7dfa0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4907,9 +4907,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
int cpu;
enable_swap_cgroup();
parent = NULL;
- root_mem_cgroup = memcg;
if (mem_cgroup_soft_limit_tree_init())
goto free_out;
+ root_mem_cgroup = memcg;
for_each_possible_cpu(cpu) {
struct memcg_stock_pcp *stock =
&per_cpu(memcg_stock, cpu);
@@ -4948,7 +4948,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
return &memcg->css;
free_out:
__mem_cgroup_free(memcg);
- root_mem_cgroup = NULL;
return ERR_PTR(error);
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 578e29174fa..177aca424a0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -871,9 +871,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (anon_vma)
put_anon_vma(anon_vma);
-out:
unlock_page(hpage);
+out:
if (rc != -EAGAIN) {
list_del(&hpage->lru);
put_page(hpage);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 3134ee2fb2e..eeb27e27dce 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -176,7 +176,7 @@ static bool oom_unkillable_task(struct task_struct *p,
unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
const nodemask_t *nodemask, unsigned long totalpages)
{
- int points;
+ long points;
if (oom_unkillable_task(p, mem, nodemask))
return 0;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 71252486bc6..50f08241f98 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -411,8 +411,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
*
* Returns @bdi's dirty limit in pages. The term "dirty" in the context of
* dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
- * And the "limit" in the name is not seriously taken as hard limit in
- * balance_dirty_pages().
+ *
+ * Note that balance_dirty_pages() will only seriously take it as a hard limit
+ * when sleeping max_pause per page is not enough to keep the dirty pages under
+ * control. For example, when the device is completely stalled due to some error
+ * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
+ * In the other normal situations, it acts more gently by throttling the tasks
+ * more (rather than completely block them) when the bdi dirty pages go high.
*
* It allocates high/low dirty limits to fast/slow devices, in order to prevent
* - starving fast devices
@@ -594,6 +599,13 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
*/
if (unlikely(bdi_thresh > thresh))
bdi_thresh = thresh;
+ /*
+ * It's very possible that bdi_thresh is close to 0 not because the
+ * device is slow, but that it has remained inactive for long time.
+ * Honour such devices a reasonable good (hopefully IO efficient)
+ * threshold, so that the occasional writes won't be blocked and active
+ * writes can rampup the threshold quickly.
+ */
bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
/*
* scale global setpoint to bdi's:
@@ -977,8 +989,7 @@ static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
*
* 8 serves as the safety ratio.
*/
- if (bdi_dirty)
- t = min(t, bdi_dirty * HZ / (8 * bw + 1));
+ t = min(t, bdi_dirty * HZ / (8 * bw + 1));
/*
* The pause time will be settled within range (max_pause/4, max_pause).
@@ -1136,6 +1147,19 @@ pause:
if (task_ratelimit)
break;
+ /*
+ * In the case of an unresponding NFS server and the NFS dirty
+ * pages exceeds dirty_thresh, give the other good bdi's a pipe
+ * to go through, so that tasks on them still remain responsive.
+ *
+ * In theory 1 page is enough to keep the comsumer-producer
+ * pipe going: the flusher cleans 1 page => the task dirties 1
+ * more page. However bdi_dirty has accounting errors. So use
+ * the larger and more IO friendly bdi_stat_error.
+ */
+ if (bdi_dirty <= bdi_stat_error(bdi))
+ break;
+
if (fatal_signal_pending(current))
break;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9dd443d89d8..2b8ba3aebf6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -356,8 +356,8 @@ void prep_compound_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
-
__SetPageTail(p);
+ set_page_count(p, 0);
p->first_page = page;
}
}
@@ -3377,9 +3377,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
unsigned long block_migratetype;
int reserve;
- /* Get the start pfn, end pfn and the number of blocks to reserve */
+ /*
+ * Get the start pfn, end pfn and the number of blocks to reserve
+ * We have to be careful to be aligned to pageblock_nr_pages to
+ * make sure that we always check pfn_valid for the first page in
+ * the block.
+ */
start_pfn = zone->zone_start_pfn;
end_pfn = start_pfn + zone->spanned_pages;
+ start_pfn = roundup(start_pfn, pageblock_nr_pages);
reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
pageblock_order;
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index ea534960a04..12a48a88c0d 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -50,14 +50,13 @@ static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
if (!pages || !bitmap) {
if (may_alloc && !pages)
- pages = pcpu_mem_alloc(pages_size);
+ pages = pcpu_mem_zalloc(pages_size);
if (may_alloc && !bitmap)
- bitmap = pcpu_mem_alloc(bitmap_size);
+ bitmap = pcpu_mem_zalloc(bitmap_size);
if (!pages || !bitmap)
return NULL;
}
- memset(pages, 0, pages_size);
bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
*bitmapp = bitmap;
@@ -143,8 +142,8 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
flush_cache_vunmap(
- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
@@ -206,8 +205,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
flush_tlb_kernel_range(
- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
static int __pcpu_map_pages(unsigned long addr, struct page **pages,
@@ -284,8 +283,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
flush_cache_vmap(
- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
/**
diff --git a/mm/percpu.c b/mm/percpu.c
index bf80e55dbed..716eb4acf2f 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly;
static int pcpu_nr_slots __read_mostly;
static size_t pcpu_chunk_struct_size __read_mostly;
-/* cpus with the lowest and highest unit numbers */
-static unsigned int pcpu_first_unit_cpu __read_mostly;
-static unsigned int pcpu_last_unit_cpu __read_mostly;
+/* cpus with the lowest and highest unit addresses */
+static unsigned int pcpu_low_unit_cpu __read_mostly;
+static unsigned int pcpu_high_unit_cpu __read_mostly;
/* the address of the first chunk which starts with the kernel static area */
void *pcpu_base_addr __read_mostly;
@@ -273,11 +273,11 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
(rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
/**
- * pcpu_mem_alloc - allocate memory
+ * pcpu_mem_zalloc - allocate memory
* @size: bytes to allocate
*
* Allocate @size bytes. If @size is smaller than PAGE_SIZE,
- * kzalloc() is used; otherwise, vmalloc() is used. The returned
+ * kzalloc() is used; otherwise, vzalloc() is used. The returned
* memory is always zeroed.
*
* CONTEXT:
@@ -286,7 +286,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
* RETURNS:
* Pointer to the allocated area on success, NULL on failure.
*/
-static void *pcpu_mem_alloc(size_t size)
+static void *pcpu_mem_zalloc(size_t size)
{
if (WARN_ON_ONCE(!slab_is_available()))
return NULL;
@@ -302,7 +302,7 @@ static void *pcpu_mem_alloc(size_t size)
* @ptr: memory to free
* @size: size of the area
*
- * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
+ * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
*/
static void pcpu_mem_free(void *ptr, size_t size)
{
@@ -384,7 +384,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
unsigned long flags;
- new = pcpu_mem_alloc(new_size);
+ new = pcpu_mem_zalloc(new_size);
if (!new)
return -ENOMEM;
@@ -604,11 +604,12 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
{
struct pcpu_chunk *chunk;
- chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
+ chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
if (!chunk)
return NULL;
- chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
+ chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
+ sizeof(chunk->map[0]));
if (!chunk->map) {
kfree(chunk);
return NULL;
@@ -977,6 +978,17 @@ bool is_kernel_percpu_address(unsigned long addr)
* address. The caller is responsible for ensuring @addr stays valid
* until this function finishes.
*
+ * percpu allocator has special setup for the first chunk, which currently
+ * supports either embedding in linear address space or vmalloc mapping,
+ * and, from the second one, the backing allocator (currently either vm or
+ * km) provides translation.
+ *
+ * The addr can be tranlated simply without checking if it falls into the
+ * first chunk. But the current code reflects better how percpu allocator
+ * actually works, and the verification can discover both bugs in percpu
+ * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
+ * code.
+ *
* RETURNS:
* The physical address for @addr.
*/
@@ -984,19 +996,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
bool in_first_chunk = false;
- unsigned long first_start, first_end;
+ unsigned long first_low, first_high;
unsigned int cpu;
/*
- * The following test on first_start/end isn't strictly
+ * The following test on unit_low/high isn't strictly
* necessary but will speed up lookups of addresses which
* aren't in the first chunk.
*/
- first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
- first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
- pcpu_unit_pages);
- if ((unsigned long)addr >= first_start &&
- (unsigned long)addr < first_end) {
+ first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
+ first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
+ pcpu_unit_pages);
+ if ((unsigned long)addr >= first_low &&
+ (unsigned long)addr < first_high) {
for_each_possible_cpu(cpu) {
void *start = per_cpu_ptr(base, cpu);
@@ -1011,9 +1023,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
if (!is_vmalloc_addr(addr))
return __pa(addr);
else
- return page_to_phys(vmalloc_to_page(addr));
+ return page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
} else
- return page_to_phys(pcpu_addr_to_page(addr));
+ return page_to_phys(pcpu_addr_to_page(addr)) +
+ offset_in_page(addr);
}
/**
@@ -1233,7 +1247,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX;
- pcpu_first_unit_cpu = NR_CPUS;
+
+ pcpu_low_unit_cpu = NR_CPUS;
+ pcpu_high_unit_cpu = NR_CPUS;
for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
const struct pcpu_group_info *gi = &ai->groups[group];
@@ -1253,9 +1269,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
unit_map[cpu] = unit + i;
unit_off[cpu] = gi->base_offset + i * ai->unit_size;
- if (pcpu_first_unit_cpu == NR_CPUS)
- pcpu_first_unit_cpu = cpu;
- pcpu_last_unit_cpu = cpu;
+ /* determine low/high unit_cpu */
+ if (pcpu_low_unit_cpu == NR_CPUS ||
+ unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
+ pcpu_low_unit_cpu = cpu;
+ if (pcpu_high_unit_cpu == NR_CPUS ||
+ unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
+ pcpu_high_unit_cpu = cpu;
}
}
pcpu_nr_units = unit;
@@ -1889,7 +1909,7 @@ void __init percpu_init_late(void)
BUILD_BUG_ON(size > PAGE_SIZE);
- map = pcpu_mem_alloc(size);
+ map = pcpu_mem_zalloc(size);
BUG_ON(!map);
spin_lock_irqsave(&pcpu_lock, flags);
diff --git a/mm/slab.c b/mm/slab.c
index 708efe88615..83311c9aaf9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -595,6 +595,7 @@ static enum {
PARTIAL_AC,
PARTIAL_L3,
EARLY,
+ LATE,
FULL
} g_cpucache_up;
@@ -671,7 +672,7 @@ static void init_node_lock_keys(int q)
{
struct cache_sizes *s = malloc_sizes;
- if (g_cpucache_up != FULL)
+ if (g_cpucache_up < LATE)
return;
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1666,6 +1667,8 @@ void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
+ g_cpucache_up = LATE;
+
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
diff --git a/mm/slub.c b/mm/slub.c
index 7d2a996c307..ed3334d9b6d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1862,7 +1862,7 @@ static void unfreeze_partials(struct kmem_cache *s)
{
struct kmem_cache_node *n = NULL;
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
- struct page *page;
+ struct page *page, *discard_page = NULL;
while ((page = c->partial)) {
enum slab_modes { M_PARTIAL, M_FREE };
@@ -1904,7 +1904,8 @@ static void unfreeze_partials(struct kmem_cache *s)
if (l == M_PARTIAL)
remove_partial(n, page);
else
- add_partial(n, page, 1);
+ add_partial(n, page,
+ DEACTIVATE_TO_TAIL);
l = m;
}
@@ -1915,14 +1916,22 @@ static void unfreeze_partials(struct kmem_cache *s)
"unfreezing slab"));
if (m == M_FREE) {
- stat(s, DEACTIVATE_EMPTY);
- discard_slab(s, page);
- stat(s, FREE_SLAB);
+ page->next = discard_page;
+ discard_page = page;
}
}
if (n)
spin_unlock(&n->list_lock);
+
+ while (discard_page) {
+ page = discard_page;
+ discard_page = discard_page->next;
+
+ stat(s, DEACTIVATE_EMPTY);
+ discard_slab(s, page);
+ stat(s, FREE_SLAB);
+ }
}
/*
@@ -1969,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->pobjects = pobjects;
page->next = oldpage;
- } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+ } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
stat(s, CPU_PARTIAL_FREE);
return pobjects;
}
@@ -4435,30 +4444,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+ int node = ACCESS_ONCE(c->node);
struct page *page;
- if (!c || c->node < 0)
+ if (node < 0)
continue;
-
- if (c->page) {
- if (flags & SO_TOTAL)
- x = c->page->objects;
+ page = ACCESS_ONCE(c->page);
+ if (page) {
+ if (flags & SO_TOTAL)
+ x = page->objects;
else if (flags & SO_OBJECTS)
- x = c->page->inuse;
+ x = page->inuse;
else
x = 1;
total += x;
- nodes[c->node] += x;
+ nodes[node] += x;
}
page = c->partial;
if (page) {
x = page->pobjects;
- total += x;
- nodes[c->node] += x;
+ total += x;
+ nodes[node] += x;
}
- per_cpu[c->node]++;
+ per_cpu[node]++;
}
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3231bf33287..27be2f0d4cb 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1290,7 +1290,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long flags, unsigned long start,
unsigned long end, int node, gfp_t gfp_mask, void *caller)
{
- static struct vmap_area *va;
+ struct vmap_area *va;
struct vm_struct *area;
BUG_ON(in_interrupt());
@@ -1633,6 +1633,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
goto fail;
addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+ if (!addr)
+ return NULL;
/*
* In this function, newly allocated vm_struct is not added
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a1893c05079..f54a05b7a61 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
*/
void register_shrinker(struct shrinker *shrinker)
{
- shrinker->nr = 0;
+ atomic_long_set(&shrinker->nr_in_batch, 0);
down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list);
up_write(&shrinker_rwsem);
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink,
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
- unsigned long total_scan;
- unsigned long max_pass;
+ long total_scan;
+ long max_pass;
int shrink_ret = 0;
long nr;
long new_nr;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
+ max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+ if (max_pass <= 0)
+ continue;
+
/*
* copy the current shrinker scan count into a local variable
* and zero it so that other concurrent shrinker invocations
* don't also do this scanning work.
*/
- do {
- nr = shrinker->nr;
- } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+ nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
total_scan = nr;
- max_pass = do_shrinker_shrink(shrinker, shrink, 0);
delta = (4 * nr_pages_scanned) / shrinker->seeks;
delta *= max_pass;
do_div(delta, lru_pages + 1);
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
* manner that handles concurrent updates. If we exhausted the
* scan, there is no need to do an update.
*/
- do {
- nr = shrinker->nr;
- new_nr = total_scan + nr;
- if (total_scan <= 0)
- break;
- } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+ if (total_scan > 0)
+ new_nr = atomic_long_add_return(total_scan,
+ &shrinker->nr_in_batch);
+ else
+ new_nr = atomic_long_read(&shrinker->nr_in_batch);
trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index c7aafc7c5ed..5f09a578d49 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -245,9 +245,11 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
if (tt_global_entry) {
/* This node is probably going to update its tt table */
tt_global_entry->orig_node->tt_poss_change = true;
- /* The global entry has to be marked as PENDING and has to be
+ /* The global entry has to be marked as ROAMING and has to be
* kept for consistency purpose */
- tt_global_entry->flags |= TT_CLIENT_PENDING;
+ tt_global_entry->flags |= TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = jiffies;
+
send_roam_adv(bat_priv, tt_global_entry->addr,
tt_global_entry->orig_node);
}
@@ -694,6 +696,7 @@ void tt_global_del(struct bat_priv *bat_priv,
const char *message, bool roaming)
{
struct tt_global_entry *tt_global_entry = NULL;
+ struct tt_local_entry *tt_local_entry = NULL;
tt_global_entry = tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
@@ -701,15 +704,29 @@ void tt_global_del(struct bat_priv *bat_priv,
if (tt_global_entry->orig_node == orig_node) {
if (roaming) {
- tt_global_entry->flags |= TT_CLIENT_ROAM;
- tt_global_entry->roam_at = jiffies;
- goto out;
+ /* if we are deleting a global entry due to a roam
+ * event, there are two possibilities:
+ * 1) the client roamed from node A to node B => we mark
+ * it with TT_CLIENT_ROAM, we start a timer and we
+ * wait for node B to claim it. In case of timeout
+ * the entry is purged.
+ * 2) the client roamed to us => we can directly delete
+ * the global entry, since it is useless now. */
+ tt_local_entry = tt_local_hash_find(bat_priv,
+ tt_global_entry->addr);
+ if (!tt_local_entry) {
+ tt_global_entry->flags |= TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = jiffies;
+ goto out;
+ }
}
_tt_global_del(bat_priv, tt_global_entry, message);
}
out:
if (tt_global_entry)
tt_global_entry_free_ref(tt_global_entry);
+ if (tt_local_entry)
+ tt_local_entry_free_ref(tt_local_entry);
}
void tt_global_del_orig(struct bat_priv *bat_priv,
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 91bcd3a961e..1eea8208b2c 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -79,17 +79,12 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
static void __bnep_link_session(struct bnep_session *s)
{
- /* It's safe to call __module_get() here because sessions are added
- by the socket layer which has to hold the reference to this module.
- */
- __module_get(THIS_MODULE);
list_add(&s->list, &bnep_session_list);
}
static void __bnep_unlink_session(struct bnep_session *s)
{
list_del(&s->list);
- module_put(THIS_MODULE);
}
static int bnep_send(struct bnep_session *s, void *data, size_t len)
@@ -530,6 +525,7 @@ static int bnep_session(void *arg)
up_write(&bnep_session_sem);
free_netdev(dev);
+ module_put_and_exit(0);
return 0;
}
@@ -616,9 +612,11 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
__bnep_link_session(s);
+ __module_get(THIS_MODULE);
s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
if (IS_ERR(s->task)) {
/* Session thread start failed, gotta cleanup. */
+ module_put(THIS_MODULE);
unregister_netdev(dev);
__bnep_unlink_session(s);
err = PTR_ERR(s->task);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 7d00ddf9e9d..5a6e634f7fc 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -67,14 +67,12 @@ static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
static void __cmtp_link_session(struct cmtp_session *session)
{
- __module_get(THIS_MODULE);
list_add(&session->list, &cmtp_session_list);
}
static void __cmtp_unlink_session(struct cmtp_session *session)
{
list_del(&session->list);
- module_put(THIS_MODULE);
}
static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
@@ -327,6 +325,7 @@ static int cmtp_session(void *arg)
up_write(&cmtp_session_sem);
kfree(session);
+ module_put_and_exit(0);
return 0;
}
@@ -376,9 +375,11 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
__cmtp_link_session(session);
+ __module_get(THIS_MODULE);
session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
session->num);
if (IS_ERR(session->task)) {
+ module_put(THIS_MODULE);
err = PTR_ERR(session->task);
goto unlink;
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d7d96b6b1f0..643a41b76e2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -545,7 +545,7 @@ static void hci_setup(struct hci_dev *hdev)
{
hci_setup_event_mask(hdev);
- if (hdev->lmp_ver > 1)
+ if (hdev->hci_ver > 1)
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (hdev->features[6] & LMP_SIMPLE_PAIR) {
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e5f9ece3c9a..a1daf8227ed 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -18,6 +18,7 @@
#include <net/sock.h>
#include "br_private.h"
+#include "br_private_stp.h"
static inline size_t br_nlmsg_size(void)
{
@@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
p->state = new_state;
br_log_state(p);
+
+ spin_lock_bh(&p->br->lock);
+ br_port_state_selection(p->br);
+ spin_unlock_bh(&p->br->lock);
+
br_ifinfo_notify(RTM_NEWLINK, p);
return 0;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index ad0a3f7cf6c..dd147d78a58 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br)
struct net_bridge_port *p;
unsigned int liveports = 0;
- /* Don't change port states if userspace is handling STP */
- if (br->stp_enabled == BR_USER_STP)
- return;
-
list_for_each_entry(p, &br->port_list, list) {
if (p->state == BR_STATE_DISABLED)
continue;
- if (p->port_no == br->root_port) {
- p->config_pending = 0;
- p->topology_change_ack = 0;
- br_make_forwarding(p);
- } else if (br_is_designated_port(p)) {
- del_timer(&p->message_age_timer);
- br_make_forwarding(p);
- } else {
- p->config_pending = 0;
- p->topology_change_ack = 0;
- br_make_blocking(p);
+ /* Don't change port states if userspace is handling STP */
+ if (br->stp_enabled != BR_USER_STP) {
+ if (p->port_no == br->root_port) {
+ p->config_pending = 0;
+ p->topology_change_ack = 0;
+ br_make_forwarding(p);
+ } else if (br_is_designated_port(p)) {
+ del_timer(&p->message_age_timer);
+ br_make_forwarding(p);
+ } else {
+ p->config_pending = 0;
+ p->topology_change_ack = 0;
+ br_make_blocking(p);
+ }
}
if (p->state == BR_STATE_FORWARDING)
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index f39921171d0..d3ca87bf23b 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -136,20 +136,21 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
- int tmp;
u16 chks;
u16 len;
+ __le16 data;
+
struct cffrml *this = container_obj(layr);
if (this->dofcs) {
chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
- tmp = cpu_to_le16(chks);
- cfpkt_add_trail(pkt, &tmp, 2);
+ data = cpu_to_le16(chks);
+ cfpkt_add_trail(pkt, &data, 2);
} else {
cfpkt_pad_trail(pkt, 2);
}
len = cfpkt_getlen(pkt);
- tmp = cpu_to_le16(len);
- cfpkt_add_head(pkt, &tmp, 2);
+ data = cpu_to_le16(len);
+ cfpkt_add_head(pkt, &data, 2);
cfpkt_info(pkt)->hdr_len += 2;
if (cfpkt_erroneous(pkt)) {
pr_err("Packet is erroneous!\n");
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 42599e31dca..3a94eae7abe 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -477,7 +477,6 @@ int crush_do_rule(struct crush_map *map,
int i, j;
int numrep;
int firstn;
- int rc = -1;
BUG_ON(ruleno >= map->max_rules);
@@ -491,23 +490,18 @@ int crush_do_rule(struct crush_map *map,
* that this may or may not correspond to the specific types
* referenced by the crush rule.
*/
- if (force >= 0) {
- if (force >= map->max_devices ||
- map->device_parents[force] == 0) {
- /*dprintk("CRUSH: forcefed device dne\n");*/
- rc = -1; /* force fed device dne */
- goto out;
- }
- if (!is_out(map, weight, force, x)) {
- while (1) {
- force_context[++force_pos] = force;
- if (force >= 0)
- force = map->device_parents[force];
- else
- force = map->bucket_parents[-1-force];
- if (force == 0)
- break;
- }
+ if (force >= 0 &&
+ force < map->max_devices &&
+ map->device_parents[force] != 0 &&
+ !is_out(map, weight, force, x)) {
+ while (1) {
+ force_context[++force_pos] = force;
+ if (force >= 0)
+ force = map->device_parents[force];
+ else
+ force = map->bucket_parents[-1-force];
+ if (force == 0)
+ break;
}
}
@@ -600,10 +594,7 @@ int crush_do_rule(struct crush_map *map,
BUG_ON(1);
}
}
- rc = result_len;
-
-out:
- return rc;
+ return result_len;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ba50a1e404..5a13edfc9f7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1396,7 +1396,7 @@ rollback:
for_each_net(net) {
for_each_netdev(net, dev) {
if (dev == last)
- break;
+ goto outroll;
if (dev->flags & IFF_UP) {
nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
@@ -1407,6 +1407,7 @@ rollback:
}
}
+outroll:
raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
@@ -4282,6 +4283,12 @@ static int dev_seq_open(struct inode *inode, struct file *file)
sizeof(struct dev_iter_state));
}
+int dev_seq_open_ops(struct inode *inode, struct file *file,
+ const struct seq_operations *ops)
+{
+ return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
+}
+
static const struct file_operations dev_seq_fops = {
.owner = THIS_MODULE,
.open = dev_seq_open,
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 277faef9148..febba516db6 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = {
static int dev_mc_seq_open(struct inode *inode, struct file *file)
{
- return seq_open_net(inode, file, &dev_mc_seq_ops,
- sizeof(struct seq_net_private));
+ return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
}
static const struct file_operations dev_mc_seq_fops = {
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 039d51e6c28..5ac07d31fbc 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2397,7 +2397,10 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct neigh_table *tbl = state->tbl;
- pn = pn->next;
+ do {
+ pn = pn->next;
+ } while (pn && !net_eq(pneigh_net(pn), net));
+
while (!pn) {
if (++state->bucket > PNEIGH_HASHMASK)
break;
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 182236b2510..9b570a6a33c 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -26,10 +26,11 @@
* but then some measure against one socket starving all other sockets
* would be needed.
*
- * It was 128 by default. Experiments with real servers show, that
+ * The minimum value of it is 128. Experiments with real servers show that
* it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems. This value is adjusted to 128 for very small machines
- * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
+ * of problems.
+ * This value is adjusted to 128 for low memory machines,
+ * and it will increase in proportion to the memory of machine.
* Note : Dont forget somaxconn that may limit backlog too.
*/
int sysctl_max_syn_backlog = 256;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 025233de25f..925991ae6f5 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -19,6 +19,7 @@ static int __init net_secret_init(void)
}
late_initcall(net_secret_init);
+#ifdef CONFIG_INET
static u32 seq_scale(u32 seq)
{
/*
@@ -33,6 +34,7 @@ static u32 seq_scale(u32 seq)
*/
return seq + (ktime_to_ns(ktime_get_real()) >> 6);
}
+#endif
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 18a3cebb753..3c30ee4a571 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2230,7 +2230,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb)
* @shiftlen: shift up to this many bytes
*
* Attempts to shift up to shiftlen worth of bytes, which may be less than
- * the length of the skb, from tgt to skb. Returns number bytes shifted.
+ * the length of the skb, from skb to tgt. Returns number bytes shifted.
* It's up to caller to free skb if everything was shifted.
*
* If @tgt runs out of frags, the whole operation is aborted.
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 90a919afbed..3f4e5414c8e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -111,6 +111,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
rt = NULL;
goto failure;
}
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index a77d16158eb..94f4ec03666 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -112,7 +112,7 @@ static unsigned long dn_rt_deadline;
static int dn_dst_gc(struct dst_ops *ops);
static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static unsigned int dn_dst_mtu(const struct dst_entry *dst);
static void dn_dst_destroy(struct dst_entry *);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
@@ -135,7 +135,7 @@ static struct dst_ops dn_dst_ops = {
.gc = dn_dst_gc,
.check = dn_dst_check,
.default_advmss = dn_dst_default_advmss,
- .default_mtu = dn_dst_default_mtu,
+ .mtu = dn_dst_mtu,
.cow_metrics = dst_cow_metrics_generic,
.destroy = dn_dst_destroy,
.negative_advice = dn_dst_negative_advice,
@@ -825,9 +825,11 @@ static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
}
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst)
+static unsigned int dn_dst_mtu(const struct dst_entry *dst)
{
- return dst->dev->mtu;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst->dev->mtu;
}
static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
index 67f691bd4ac..d9c150cc59a 100644
--- a/net/decnet/dn_timer.c
+++ b/net/decnet/dn_timer.c
@@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg);
void dn_start_slow_timer(struct sock *sk)
{
- sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
- sk->sk_timer.function = dn_slow_timer;
- sk->sk_timer.data = (unsigned long)sk;
-
- add_timer(&sk->sk_timer);
+ setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
}
void dn_stop_slow_timer(struct sock *sk)
{
- del_timer(&sk->sk_timer);
+ sk_stop_timer(sk, &sk->sk_timer);
}
static void dn_slow_timer(unsigned long arg)
@@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg)
struct sock *sk = (struct sock *)arg;
struct dn_scp *scp = DN_SK(sk);
- sock_hold(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- sk->sk_timer.expires = jiffies + HZ / 10;
- add_timer(&sk->sk_timer);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
goto out;
}
@@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg)
scp->keepalive_fxn(sk);
}
- sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
-
- add_timer(&sk->sk_timer);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
out:
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index c6b5092f29a..65f01dc4756 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
+ int old_value = *(int *)ctl->data;
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ int new_value = *(int *)ctl->data;
if (write) {
struct ipv4_devconf *cnf = ctl->extra1;
@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
if (cnf == net->ipv4.devconf_dflt)
devinet_copy_dflt_conf(net, i);
+ if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
+ if ((new_value == 0) && (old_value != 0))
+ rt_cache_flush(net, 0);
}
return ret;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c7472eff2d5..b2ca095cb9d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1716,7 +1716,8 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
if (err) {
int j;
- pmc->sfcount[sfmode]--;
+ if (!delta)
+ pmc->sfcount[sfmode]--;
for (j=0; j<i; j++)
(void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 68e8ac51438..ccee270a9b6 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,9 +108,6 @@ static int inet_csk_diag_fill(struct sock *sk,
icsk->icsk_ca_ops->name);
}
- if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
- RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
-
r->idiag_family = sk->sk_family;
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
@@ -125,16 +122,23 @@ static int inet_csk_diag_fill(struct sock *sk,
r->id.idiag_src[0] = inet->inet_rcv_saddr;
r->id.idiag_dst[0] = inet->inet_daddr;
+ /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
+ * hence this needs to be included regardless of socket family.
+ */
+ if (ext & (1 << (INET_DIAG_TOS - 1)))
+ RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
+
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (r->idiag_family == AF_INET6) {
const struct ipv6_pinfo *np = inet6_sk(sk);
+ if (ext & (1 << (INET_DIAG_TCLASS - 1)))
+ RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
+
ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
&np->rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
&np->daddr);
- if (ext & (1 << (INET_DIAG_TCLASS - 1)))
- RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
}
#endif
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 3b34d1c8627..29a07b6c716 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
rt = skb_rtable(skb);
- if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
+ if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
goto sr_failed;
if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 05d20cca9d6..1e60f767907 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -568,12 +568,13 @@ void ip_forward_options(struct sk_buff *skb)
) {
if (srrptr + 3 > srrspace)
break;
- if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
+ if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
break;
}
if (srrptr + 3 <= srrspace) {
opt->is_changed = 1;
ip_rt_get_source(&optptr[srrptr-1], skb, rt);
+ ip_hdr(skb)->daddr = opt->nexthop;
optptr[2] = srrptr+4;
} else if (net_ratelimit())
printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
@@ -640,7 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
}
if (srrptr <= srrspace) {
opt->srr_is_hit = 1;
- iph->daddr = nexthop;
+ opt->nexthop = nexthop;
opt->is_changed = 1;
}
return 0;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 065effd8349..0b2e7329abd 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
if (register_netdevice(dev) < 0)
goto failed_free;
+ strcpy(nt->parms.name, dev->name);
+
dev_hold(dev);
ipip_tunnel_link(ipn, nt);
return nt;
@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
static int __net_init ipip_init_net(struct net *net)
{
struct ipip_net *ipn = net_generic(net, ipip_net_id);
+ struct ip_tunnel *t;
int err;
ipn->tunnels[0] = ipn->tunnels_wc;
@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
if ((err = register_netdev(ipn->fb_tunnel_dev)))
goto err_reg_dev;
+ t = netdev_priv(ipn->fb_tunnel_dev);
+
+ strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
return 0;
err_reg_dev:
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 9899619ab9b..4f47e064e26 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
/* Change in oif may mean change in hh_len. */
hh_len = skb_dst(skb)->dev->hard_header_len;
if (skb_headroom(skb) < hh_len &&
- pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
+ pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
+ 0, GFP_ATOMIC))
return -1;
return 0;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 1dfc18a03fd..f19f2182894 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -325,7 +325,6 @@ config IP_NF_TARGET_TTL
# raw + specific targets
config IP_NF_RAW
tristate 'raw table support (required for NOTRACK/TRACE)'
- depends on NETFILTER_ADVANCED
help
This option adds a `raw' table to iptables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 0c74da8a047..46af62363b8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -112,7 +112,7 @@
#include <net/secure_seq.h>
#define RT_FL_TOS(oldflp4) \
- ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
+ ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
#define IP_MAX_MTU 0xFFF0
@@ -131,6 +131,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
+static int redirect_genid;
/*
* Interface to generic destination cache.
@@ -138,7 +139,7 @@ static int rt_chain_length_max __read_mostly = 20;
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
-static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
+static unsigned int ipv4_mtu(const struct dst_entry *dst);
static void ipv4_dst_destroy(struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
@@ -193,7 +194,7 @@ static struct dst_ops ipv4_dst_ops = {
.gc = rt_garbage_collect,
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
- .default_mtu = ipv4_default_mtu,
+ .mtu = ipv4_mtu,
.cow_metrics = ipv4_cow_metrics,
.destroy = ipv4_dst_destroy,
.ifdown = ipv4_dst_ifdown,
@@ -416,9 +417,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
else {
struct rtable *r = v;
struct neighbour *n;
- int len;
+ int len, HHUptod;
+ rcu_read_lock();
n = dst_get_neighbour(&r->dst);
+ HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
+ rcu_read_unlock();
+
seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
"%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
r->dst.dev ? r->dst.dev->name : "*",
@@ -432,7 +437,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
dst_metric(&r->dst, RTAX_RTTVAR)),
r->rt_key_tos,
-1,
- (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
+ HHUptod,
r->rt_spec_dst, &len);
seq_printf(seq, "%*s\n", 127 - len, "");
@@ -837,6 +842,7 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+ redirect_genid++;
}
/*
@@ -1304,7 +1310,7 @@ static void rt_del(unsigned hash, struct rtable *rt)
spin_unlock_bh(rt_hash_lock_addr(hash));
}
-static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
{
struct rtable *rt = (struct rtable *) dst;
__be32 orig_gw = rt->rt_gateway;
@@ -1315,21 +1321,19 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
rt->rt_gateway = peer->redirect_learned.a4;
n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
- if (IS_ERR(n))
- return PTR_ERR(n);
+ if (IS_ERR(n)) {
+ rt->rt_gateway = orig_gw;
+ return;
+ }
old_n = xchg(&rt->dst._neighbour, n);
if (old_n)
neigh_release(old_n);
- if (!n || !(n->nud_state & NUD_VALID)) {
- if (n)
- neigh_event_send(n, NULL);
- rt->rt_gateway = orig_gw;
- return -EAGAIN;
+ if (!(n->nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
} else {
rt->rt_flags |= RTCF_REDIRECTED;
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
}
- return 0;
}
/* called in rcu_read_lock() section */
@@ -1391,8 +1395,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
peer = rt->peer;
if (peer) {
- if (peer->redirect_learned.a4 != new_gw) {
+ if (peer->redirect_learned.a4 != new_gw ||
+ peer->redirect_genid != redirect_genid) {
peer->redirect_learned.a4 = new_gw;
+ peer->redirect_genid = redirect_genid;
atomic_inc(&__rt_peer_genid);
}
check_peer_redir(&rt->dst, peer);
@@ -1685,12 +1691,8 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
}
-static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+static void ipv4_validate_peer(struct rtable *rt)
{
- struct rtable *rt = (struct rtable *) dst;
-
- if (rt_is_expired(rt))
- return NULL;
if (rt->rt_peer_genid != rt_peer_genid()) {
struct inet_peer *peer;
@@ -1699,17 +1701,26 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
peer = rt->peer;
if (peer) {
- check_peer_pmtu(dst, peer);
+ check_peer_pmtu(&rt->dst, peer);
+ if (peer->redirect_genid != redirect_genid)
+ peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
- peer->redirect_learned.a4 != rt->rt_gateway) {
- if (check_peer_redir(dst, peer))
- return NULL;
- }
+ peer->redirect_learned.a4 != rt->rt_gateway)
+ check_peer_redir(&rt->dst, peer);
}
rt->rt_peer_genid = rt_peer_genid();
}
+}
+
+static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+{
+ struct rtable *rt = (struct rtable *) dst;
+
+ if (rt_is_expired(rt))
+ return NULL;
+ ipv4_validate_peer(rt);
return dst;
}
@@ -1814,12 +1825,17 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
return advmss;
}
-static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_mtu(const struct dst_entry *dst)
{
- unsigned int mtu = dst->dev->mtu;
+ const struct rtable *rt = (const struct rtable *) dst;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ if (mtu && rt_is_output_route(rt))
+ return mtu;
+
+ mtu = dst->dev->mtu;
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
- const struct rtable *rt = (const struct rtable *) dst;
if (rt->rt_gateway != rt->rt_dst && mtu > 576)
mtu = 576;
@@ -1852,6 +1868,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
dst_init_metrics(&rt->dst, peer->metrics, false);
check_peer_pmtu(&rt->dst, peer);
+ if (peer->redirect_genid != redirect_genid)
+ peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
rt->rt_gateway = peer->redirect_learned.a4;
@@ -2357,6 +2375,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->rt_mark == skb->mark &&
net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
+ ipv4_validate_peer(rth);
if (noref) {
dst_use_noref(&rth->dst, jiffies);
skb_dst_set_noref(skb, &rth->dst);
@@ -2415,11 +2434,11 @@ EXPORT_SYMBOL(ip_route_input_common);
static struct rtable *__mkroute_output(const struct fib_result *res,
const struct flowi4 *fl4,
__be32 orig_daddr, __be32 orig_saddr,
- int orig_oif, struct net_device *dev_out,
+ int orig_oif, __u8 orig_rtos,
+ struct net_device *dev_out,
unsigned int flags)
{
struct fib_info *fi = res->fi;
- u32 tos = RT_FL_TOS(fl4);
struct in_device *in_dev;
u16 type = res->type;
struct rtable *rth;
@@ -2470,7 +2489,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
rth->rt_genid = rt_genid(dev_net(dev_out));
rth->rt_flags = flags;
rth->rt_type = type;
- rth->rt_key_tos = tos;
+ rth->rt_key_tos = orig_rtos;
rth->rt_dst = fl4->daddr;
rth->rt_src = fl4->saddr;
rth->rt_route_iif = 0;
@@ -2520,7 +2539,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
{
struct net_device *dev_out = NULL;
- u32 tos = RT_FL_TOS(fl4);
+ __u8 tos = RT_FL_TOS(fl4);
unsigned int flags = 0;
struct fib_result res;
struct rtable *rth;
@@ -2696,7 +2715,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
make_route:
rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
- dev_out, flags);
+ tos, dev_out, flags);
if (!IS_ERR(rth)) {
unsigned int hash;
@@ -2732,6 +2751,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
(IPTOS_RT_MASK | RTO_ONLINK)) &&
net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
+ ipv4_validate_peer(rth);
dst_use(&rth->dst, jiffies);
RT_CACHE_STAT_INC(out_hit);
rcu_read_unlock_bh();
@@ -2755,9 +2775,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
return NULL;
}
-static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
{
- return 0;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst->dev->mtu;
}
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -2775,7 +2797,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
.protocol = cpu_to_be16(ETH_P_IP),
.destroy = ipv4_dst_destroy,
.check = ipv4_blackhole_dst_check,
- .default_mtu = ipv4_blackhole_default_mtu,
+ .mtu = ipv4_blackhole_mtu,
.default_advmss = ipv4_default_advmss,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
.cow_metrics = ipv4_rt_blackhole_cow_metrics,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ab0966df1e2..5a65eeac1d2 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1164,7 +1164,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
- unsigned int ulen;
+ unsigned int ulen, copied;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
@@ -1186,9 +1186,10 @@ try_again:
goto out;
ulen = skb->len - sizeof(struct udphdr);
- if (len > ulen)
- len = ulen;
- else if (len < ulen)
+ copied = len;
+ if (copied > ulen)
+ copied = ulen;
+ else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
/*
@@ -1197,14 +1198,14 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, len);
+ msg->msg_iov, copied);
else {
err = skb_copy_and_csum_datagram_iovec(skb,
sizeof(struct udphdr),
@@ -1233,7 +1234,7 @@ try_again:
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
- err = len;
+ err = copied;
if (flags & MSG_TRUNC)
err = ulen;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cf88df82e2c..36806def8cf 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1805,7 +1805,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
return ERR_PTR(-EACCES);
/* Add default multicast route */
- addrconf_add_mroute(dev);
+ if (!(dev->flags & IFF_LOOPBACK))
+ addrconf_add_mroute(dev);
/* Add link local route */
addrconf_add_lroute(dev);
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index fee46d5a2f1..1567fb12039 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -85,7 +85,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
* request_sock (formerly open request) hash tables.
*/
static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
- const u32 rnd, const u16 synq_hsize)
+ const u32 rnd, const u32 synq_hsize)
{
u32 c;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index c99e3ee9781..26cb08c84b7 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -503,7 +503,7 @@ done:
goto e_inval;
if (val > 255 || val < -1)
goto e_inval;
- np->mcast_hops = val;
+ np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
retv = 0;
break;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 44e5b7f2a6c..0cb78d7ddaf 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1571,7 +1571,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
}
if (!rt->rt6i_peer)
rt6_bind_peer(rt, 1);
- if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
+ if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
goto release;
if (dev->addr_len) {
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 448464844a2..f792b34cbe9 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -186,7 +186,6 @@ config IP6_NF_MANGLE
config IP6_NF_RAW
tristate 'raw table support (required for TRACE)'
- depends on NETFILTER_ADVANCED
help
This option adds a `raw' table to ip6tables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8473016bba4..b582a0a0f1c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -77,7 +77,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
const struct in6_addr *dest);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
-static unsigned int ip6_default_mtu(const struct dst_entry *dst);
+static unsigned int ip6_mtu(const struct dst_entry *dst);
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
@@ -144,7 +144,7 @@ static struct dst_ops ip6_dst_ops_template = {
.gc_thresh = 1024,
.check = ip6_dst_check,
.default_advmss = ip6_default_advmss,
- .default_mtu = ip6_default_mtu,
+ .mtu = ip6_mtu,
.cow_metrics = ipv6_cow_metrics,
.destroy = ip6_dst_destroy,
.ifdown = ip6_dst_ifdown,
@@ -155,9 +155,11 @@ static struct dst_ops ip6_dst_ops_template = {
.neigh_lookup = ip6_neigh_lookup,
};
-static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
{
- return 0;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst->dev->mtu;
}
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -175,7 +177,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.protocol = cpu_to_be16(ETH_P_IPV6),
.destroy = ip6_dst_destroy,
.check = ip6_dst_check,
- .default_mtu = ip6_blackhole_default_mtu,
+ .mtu = ip6_blackhole_mtu,
.default_advmss = ip6_default_advmss,
.update_pmtu = ip6_rt_blackhole_update_pmtu,
.cow_metrics = ip6_rt_blackhole_cow_metrics,
@@ -726,7 +728,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
int attempts = !in_softirq();
if (!(rt->rt6i_flags&RTF_GATEWAY)) {
- if (rt->rt6i_dst.plen != 128 &&
+ if (ort->rt6i_dst.plen != 128 &&
ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
rt->rt6i_flags |= RTF_ANYCAST;
ipv6_addr_copy(&rt->rt6i_gateway, daddr);
@@ -1041,10 +1043,15 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
return mtu;
}
-static unsigned int ip6_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_mtu(const struct dst_entry *dst)
{
- unsigned int mtu = IPV6_MIN_MTU;
struct inet6_dev *idev;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ if (mtu)
+ return mtu;
+
+ mtu = IPV6_MIN_MTU;
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a7a18602a04..96f3623618e 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
if (register_netdevice(dev) < 0)
goto failed_free;
+ strcpy(nt->parms.name, dev->name);
+
dev_hold(dev);
ipip6_tunnel_link(sitn, nt);
@@ -1144,7 +1146,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -1207,6 +1208,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
static int __net_init sit_init_net(struct net *net)
{
struct sit_net *sitn = net_generic(net, sit_net_id);
+ struct ip_tunnel *t;
int err;
sitn->tunnels[0] = sitn->tunnels_wc;
@@ -1231,6 +1233,9 @@ static int __net_init sit_init_net(struct net *net)
if ((err = register_netdev(sitn->fb_tunnel_dev)))
goto err_reg_dev;
+ t = netdev_priv(sitn->fb_tunnel_dev);
+
+ strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
return 0;
err_reg_dev:
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 36131d122a6..2dea4bb7b54 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1255,6 +1255,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!want_cookie || tmp_opt.tstamp_ok)
TCP_ECN_create_request(req, tcp_hdr(skb));
+ treq->iif = sk->sk_bound_dev_if;
+
+ /* So that link locals have meaning */
+ if (!sk->sk_bound_dev_if &&
+ ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
+ treq->iif = inet6_iif(skb);
+
if (!isn) {
struct inet_peer *peer = NULL;
@@ -1264,12 +1271,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
atomic_inc(&skb->users);
treq->pktopts = skb;
}
- treq->iif = sk->sk_bound_dev_if;
-
- /* So that link locals have meaning */
- if (!sk->sk_bound_dev_if &&
- ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
- treq->iif = inet6_iif(skb);
if (want_cookie) {
isn = cookie_v6_init_sequence(sk, skb, &req->mss);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 846f4757eb8..8c254191518 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -340,7 +340,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
- unsigned int ulen;
+ unsigned int ulen, copied;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
@@ -363,9 +363,10 @@ try_again:
goto out;
ulen = skb->len - sizeof(struct udphdr);
- if (len > ulen)
- len = ulen;
- else if (len < ulen)
+ copied = len;
+ if (copied > ulen)
+ copied = ulen;
+ else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -376,14 +377,14 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov,len);
+ msg->msg_iov, copied );
else {
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
@@ -432,7 +433,7 @@ try_again:
datagram_recv_ctl(sk, msg, skb);
}
- err = len;
+ err = copied;
if (flags & MSG_TRUNC)
err = ulen;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index cf0f308abf5..89ff8c67943 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
/* Get routing info from the tunnel socket */
skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
+ skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
inet = inet_sk(sk);
fl = &inet->cork.fl;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b3f65520e7a..2e4b961648d 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -161,6 +161,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
return -ENOENT;
}
+ /* if we're already stopping ignore any new requests to stop */
+ if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+ spin_unlock_bh(&sta->lock);
+ return -EALREADY;
+ }
+
if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
/* not even started yet! */
ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -169,6 +175,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
return 0;
}
+ set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
+
spin_unlock_bh(&sta->lock);
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -176,8 +184,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
sta->sta.addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
-
del_timer_sync(&tid_tx->addba_resp_timer);
/*
@@ -187,6 +193,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
*/
clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
+ /*
+ * There might be a few packets being processed right now (on
+ * another CPU) that have already gotten past the aggregation
+ * check when it was still OPERATIONAL and consequently have
+ * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
+ * call into the driver at the same time or even before the
+ * TX paths calls into it, which could confuse the driver.
+ *
+ * Wait for all currently running TX paths to finish before
+ * telling the driver. New packets will not go through since
+ * the aggregation session is no longer OPERATIONAL.
+ */
+ synchronize_net();
+
tid_tx->stop_initiator = initiator;
tid_tx->tx_stop = tx;
@@ -283,6 +303,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
__release(agg_queue);
}
+/*
+ * splice packets from the STA's pending to the local pending,
+ * requires a call to ieee80211_agg_splice_finish later
+ */
+static void __acquires(agg_queue)
+ieee80211_agg_splice_packets(struct ieee80211_local *local,
+ struct tid_ampdu_tx *tid_tx, u16 tid)
+{
+ int queue = ieee80211_ac_from_tid(tid);
+ unsigned long flags;
+
+ ieee80211_stop_queue_agg(local, tid);
+
+ if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+ " from the pending queue\n", tid))
+ return;
+
+ if (!skb_queue_empty(&tid_tx->pending)) {
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ /* copy over remaining packets */
+ skb_queue_splice_tail_init(&tid_tx->pending,
+ &local->pending[queue]);
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ }
+}
+
+static void __releases(agg_queue)
+ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
+{
+ ieee80211_wake_queue_agg(local, tid);
+}
+
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
struct tid_ampdu_tx *tid_tx;
@@ -294,19 +346,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/*
- * While we're asking the driver about the aggregation,
- * stop the AC queue so that we don't have to worry
- * about frames that came in while we were doing that,
- * which would require us to put them to the AC pending
- * afterwards which just makes the code more complex.
+ * Start queuing up packets for this aggregation session.
+ * We're going to release them once the driver is OK with
+ * that.
*/
- ieee80211_stop_queue_agg(local, tid);
-
clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
/*
- * make sure no packets are being processed to get
- * valid starting sequence number
+ * Make sure no packets are being processed. This ensures that
+ * we have a valid starting sequence number and that in-flight
+ * packets have been flushed out and no packets for this TID
+ * will go into the driver during the ampdu_action call.
*/
synchronize_net();
@@ -320,17 +370,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
" tid %d\n", tid);
#endif
spin_lock_bh(&sta->lock);
+ ieee80211_agg_splice_packets(local, tid_tx, tid);
ieee80211_assign_tid_tx(sta, tid, NULL);
+ ieee80211_agg_splice_finish(local, tid);
spin_unlock_bh(&sta->lock);
- ieee80211_wake_queue_agg(local, tid);
kfree_rcu(tid_tx, rcu_head);
return;
}
- /* we can take packets again now */
- ieee80211_wake_queue_agg(local, tid);
-
/* activate the timer for the recipient's addBA response */
mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -446,38 +494,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
-/*
- * splice packets from the STA's pending to the local pending,
- * requires a call to ieee80211_agg_splice_finish later
- */
-static void __acquires(agg_queue)
-ieee80211_agg_splice_packets(struct ieee80211_local *local,
- struct tid_ampdu_tx *tid_tx, u16 tid)
-{
- int queue = ieee80211_ac_from_tid(tid);
- unsigned long flags;
-
- ieee80211_stop_queue_agg(local, tid);
-
- if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
- " from the pending queue\n", tid))
- return;
-
- if (!skb_queue_empty(&tid_tx->pending)) {
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- /* copy over remaining packets */
- skb_queue_splice_tail_init(&tid_tx->pending,
- &local->pending[queue]);
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- }
-}
-
-static void __releases(agg_queue)
-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
-{
- ieee80211_wake_queue_agg(local, tid);
-}
-
static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
@@ -757,11 +773,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
goto out;
}
- del_timer(&tid_tx->addba_resp_timer);
+ del_timer_sync(&tid_tx->addba_resp_timer);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
#endif
+
+ /*
+ * addba_resp_timer may have fired before we got here, and
+ * caused WANT_STOP to be set. If the stop then was already
+ * processed further, STOPPING might be set.
+ */
+ if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
+ test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG
+ "got addBA resp for tid %d but we already gave up\n",
+ tid);
+#endif
+ goto out;
+ }
+
/*
* IEEE 802.11-2007 7.3.1.14:
* In an ADDBA Response frame, when the Status Code field
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index c5f341798c1..3110cbdc501 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -274,9 +274,9 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
- PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
- "3839 bytes");
PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
+ "3839 bytes");
+ PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
"7935 bytes");
/*
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index d999bf3b84e..cae443563ec 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -757,6 +757,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (!local->int_scan_req)
return -ENOMEM;
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!local->hw.wiphy->bands[band])
+ continue;
+ local->int_scan_req->rates[band] = (u32) -1;
+ }
+
/* if low-level driver supports AP, we also support VLAN */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 80de436eae2..16518f38611 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -260,7 +260,7 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_radiotap_header *rthdr;
unsigned char *pos;
- __le16 txflags;
+ u16 txflags;
rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
@@ -290,13 +290,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
txflags = 0;
if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
!is_multicast_ether_addr(hdr->addr1))
- txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
+ txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
(info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
+ txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
+ txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
put_unaligned_le16(txflags, pos);
pos += 2;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index eca0fad0970..d5230ecc784 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1039,7 +1039,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct ieee80211_sub_if_data,
u.ap);
- memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
WARN_ON(drv_sta_add(local, sdata, &sta->sta));
}
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8260b13d93c..d5597b759ba 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -201,7 +201,6 @@ config NF_CONNTRACK_BROADCAST
config NF_CONNTRACK_NETBIOS_NS
tristate "NetBIOS name service protocol support"
- depends on NETFILTER_ADVANCED
select NF_CONNTRACK_BROADCAST
help
NetBIOS name service requests are sent as broadcast messages from an
@@ -542,7 +541,6 @@ config NETFILTER_XT_TARGET_NOTRACK
tristate '"NOTRACK" target support'
depends on IP_NF_RAW || IP6_NF_RAW
depends on NF_CONNTRACK
- depends on NETFILTER_ADVANCED
help
The NOTRACK target allows a select rule to specify
which packets *not* to enter the conntrack/NAT
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 6ee10f5d59b..37d667e3f6f 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem data = { };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index fb90e344e90..e69e2718fbe 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem data = { };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index deb3e3dfa5f..64199b4e93c 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to, ip2_last, ip2;
u32 timeout = h->timeout;
bool with_ports = false;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 6b368be937c..b62c4148b92 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -27,22 +27,17 @@
static DEFINE_MUTEX(nf_ct_ecache_mutex);
-struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
-
-struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_expect_event_cb);
-
/* deliver cached events and clear cache entry - must be called with locally
* disabled softirqs */
void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
unsigned long events;
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (notify == NULL)
goto out_unlock;
@@ -83,19 +78,20 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
-int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
+int nf_conntrack_register_notifier(struct net *net,
+ struct nf_ct_event_notifier *new)
{
int ret = 0;
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
}
- RCU_INIT_POINTER(nf_conntrack_event_cb, new);
+ RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
@@ -105,32 +101,34 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
-void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
+void nf_conntrack_unregister_notifier(struct net *net,
+ struct nf_ct_event_notifier *new)
{
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
+ RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
-int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
+int nf_ct_expect_register_notifier(struct net *net,
+ struct nf_exp_event_notifier *new)
{
int ret = 0;
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_expect_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
}
- RCU_INIT_POINTER(nf_expect_event_cb, new);
+ RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
@@ -140,15 +138,16 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
-void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
+void nf_ct_expect_unregister_notifier(struct net *net,
+ struct nf_exp_event_notifier *new)
{
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_expect_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- RCU_INIT_POINTER(nf_expect_event_cb, NULL);
+ RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index e58aa9b1fe8..ef21b221f03 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -4,7 +4,7 @@
* (C) 2001 by Jay Schulist <jschlst@samba.org>
* (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
* (C) 2003 by Patrick Mchardy <kaber@trash.net>
- * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* Initial connection tracking via netlink development funded and
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -2163,6 +2163,54 @@ MODULE_ALIAS("ip_conntrack_netlink");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
+static int __net_init ctnetlink_net_init(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ int ret;
+
+ ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
+ if (ret < 0) {
+ pr_err("ctnetlink_init: cannot register notifier.\n");
+ goto err_out;
+ }
+
+ ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
+ if (ret < 0) {
+ pr_err("ctnetlink_init: cannot expect register notifier.\n");
+ goto err_unreg_notifier;
+ }
+#endif
+ return 0;
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+err_unreg_notifier:
+ nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+err_out:
+ return ret;
+#endif
+}
+
+static void ctnetlink_net_exit(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
+ nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+#endif
+}
+
+static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
+{
+ struct net *net;
+
+ list_for_each_entry(net, net_exit_list, exit_list)
+ ctnetlink_net_exit(net);
+}
+
+static struct pernet_operations ctnetlink_net_ops = {
+ .init = ctnetlink_net_init,
+ .exit_batch = ctnetlink_net_exit_batch,
+};
+
static int __init ctnetlink_init(void)
{
int ret;
@@ -2180,28 +2228,15 @@ static int __init ctnetlink_init(void)
goto err_unreg_subsys;
}
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- ret = nf_conntrack_register_notifier(&ctnl_notifier);
- if (ret < 0) {
- pr_err("ctnetlink_init: cannot register notifier.\n");
+ if (register_pernet_subsys(&ctnetlink_net_ops)) {
+ pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys;
}
- ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
- if (ret < 0) {
- pr_err("ctnetlink_init: cannot expect register notifier.\n");
- goto err_unreg_notifier;
- }
-#endif
-
return 0;
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-err_unreg_notifier:
- nf_conntrack_unregister_notifier(&ctnl_notifier);
err_unreg_exp_subsys:
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
-#endif
err_unreg_subsys:
nfnetlink_subsys_unregister(&ctnl_subsys);
err_out:
@@ -2213,11 +2248,7 @@ static void __exit ctnetlink_exit(void)
pr_info("ctnetlink: unregistering from nfnetlink.\n");
nf_ct_remove_userspace_expectations();
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
- nf_conntrack_unregister_notifier(&ctnl_notifier);
-#endif
-
+ unregister_pernet_subsys(&ctnetlink_net_ops);
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
nfnetlink_subsys_unregister(&ctnl_subsys);
}
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 9c24de10a65..824f184f7a9 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
struct netlbl_domaddr_map *addrmap = NULL;
struct netlbl_domaddr4_map *map4 = NULL;
struct netlbl_domaddr6_map *map6 = NULL;
- const struct in_addr *addr4, *mask4;
- const struct in6_addr *addr6, *mask6;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL)
@@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
INIT_LIST_HEAD(&addrmap->list6);
switch (family) {
- case AF_INET:
- addr4 = addr;
- mask4 = mask;
+ case AF_INET: {
+ const struct in_addr *addr4 = addr;
+ const struct in_addr *mask4 = mask;
map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
if (map4 == NULL)
goto cfg_unlbl_map_add_failure;
@@ -148,9 +146,11 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
- case AF_INET6:
- addr6 = addr;
- mask6 = mask;
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case AF_INET6: {
+ const struct in6_addr *addr6 = addr;
+ const struct in6_addr *mask6 = mask;
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
if (map6 == NULL)
goto cfg_unlbl_map_add_failure;
@@ -162,11 +162,13 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
ipv6_addr_copy(&map6->list.mask, mask6);
map6->list.valid = 1;
- ret_val = netlbl_af4list_add(&map4->list,
- &addrmap->list4);
+ ret_val = netlbl_af6list_add(&map6->list,
+ &addrmap->list6);
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
+ }
+#endif /* IPv6 */
default:
goto cfg_unlbl_map_add_failure;
break;
@@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
+#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
@@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
+#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index b9493a09a87..6cd8ddfb512 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
struct gred_sched_data *q;
if (table->tab[dp] == NULL) {
- table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
+ table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
if (table->tab[dp] == NULL)
return -ENOMEM;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 6649463da1b..d617161f8dd 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -209,8 +209,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
ctl->Plog, ctl->Scell_log,
nla_data(tb[TCA_RED_STAB]));
- if (skb_queue_empty(&sch->q))
- red_end_of_idle_period(&q->parms);
+ if (!q->qdisc->q.qlen)
+ red_start_of_idle_period(&q->parms);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index a3b7120fcc7..4f4c52c0eeb 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
static int
-__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
+__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
+ struct net_device *dev, struct netdev_queue *txq,
+ struct neighbour *mn)
{
- struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
- struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
- struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
+ struct teql_sched_data *q = qdisc_priv(txq->qdisc);
struct neighbour *n = q->ncache;
if (mn->tbl == NULL)
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
}
static inline int teql_resolve(struct sk_buff *skb,
- struct sk_buff *skb_res, struct net_device *dev)
+ struct sk_buff *skb_res,
+ struct net_device *dev,
+ struct netdev_queue *txq)
{
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ struct dst_entry *dst = skb_dst(skb);
+ struct neighbour *mn;
+ int res;
+
if (txq->qdisc == &noop_qdisc)
return -ENODEV;
- if (dev->header_ops == NULL ||
- skb_dst(skb) == NULL ||
- dst_get_neighbour(skb_dst(skb)) == NULL)
+ if (!dev->header_ops || !dst)
return 0;
- return __teql_resolve(skb, skb_res, dev);
+
+ rcu_read_lock();
+ mn = dst_get_neighbour(dst);
+ res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
+ rcu_read_unlock();
+
+ return res;
}
static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -307,7 +316,7 @@ restart:
continue;
}
- switch (teql_resolve(skb, skb_res, slave)) {
+ switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
case 0:
if (__netif_tx_trylock(slave_txq)) {
unsigned int length = qdisc_pkt_len(skb);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 865e68fef21..bf812048cf6 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
struct sctp_auth_bytes *key;
/* Verify that we are not going to overflow INT_MAX */
- if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
+ if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
return NULL;
/* Allocate the shared key */
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 5317b9341b5..3341d896278 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -591,6 +591,27 @@ void rpc_prepare_task(struct rpc_task *task)
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
+static void
+rpc_init_task_statistics(struct rpc_task *task)
+{
+ /* Initialize retry counters */
+ task->tk_garb_retry = 2;
+ task->tk_cred_retry = 2;
+ task->tk_rebind_retry = 2;
+
+ /* starting timestamp */
+ task->tk_start = ktime_get();
+}
+
+static void
+rpc_reset_task_statistics(struct rpc_task *task)
+{
+ task->tk_timeouts = 0;
+ task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
+
+ rpc_init_task_statistics(task);
+}
+
/*
* Helper that calls task->tk_ops->rpc_call_done if it exists
*/
@@ -603,6 +624,7 @@ void rpc_exit_task(struct rpc_task *task)
WARN_ON(RPC_ASSASSINATED(task));
/* Always release the RPC slot and buffer memory */
xprt_release(task);
+ rpc_reset_task_statistics(task);
}
}
}
@@ -805,11 +827,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
task->tk_calldata = task_setup_data->callback_data;
INIT_LIST_HEAD(&task->tk_task);
- /* Initialize retry counters */
- task->tk_garb_retry = 2;
- task->tk_cred_retry = 2;
- task->tk_rebind_retry = 2;
-
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
task->tk_owner = current->tgid;
@@ -819,8 +836,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
- /* starting timestamp */
- task->tk_start = ktime_get();
+ rpc_init_task_statistics(task);
dprintk("RPC: new task initialized, procpid %u\n",
task_pid_nr(current));
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f4385e45a5f..c64c0ef519b 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -995,13 +995,11 @@ out_init_req:
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
- if (xprt_dynamic_free_slot(xprt, req))
- return;
-
- memset(req, 0, sizeof(*req)); /* mark unused */
-
spin_lock(&xprt->reserve_lock);
- list_add(&req->rq_list, &xprt->free);
+ if (!xprt_dynamic_free_slot(xprt, req)) {
+ memset(req, 0, sizeof(*req)); /* mark unused */
+ list_add(&req->rq_list, &xprt->free);
+ }
rpc_wake_up_next(&xprt->backlog);
spin_unlock(&xprt->reserve_lock);
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 2d78d95955a..55472c48825 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- int ret = 0;
+ int ret = -EAGAIN;
dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
task->tk_pid, req->rq_slen - req->rq_bytes_sent,
@@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task)
/* Don't race with disconnect */
if (xprt_connected(xprt)) {
if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
- ret = -EAGAIN;
/*
* Notify TCP that we're limited by the application
* window size
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 466fbcc5cf7..b595a3d8679 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1957,6 +1957,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if ((UNIXCB(skb).pid != siocb->scm->pid) ||
(UNIXCB(skb).cred != siocb->scm->cred)) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
} else {
@@ -1974,6 +1975,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
if (copied == 0)
copied = -EFAULT;
break;
@@ -1991,6 +1993,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
/* put the skb back if we didn't use it up.. */
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
@@ -2006,6 +2009,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
/* put message back and return */
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
} while (size);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b3a476fe827..ffafda5022c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -89,8 +89,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
[NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
- [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
- [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
+ [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
[NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
[NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index e71f5a66574..3302c56f60d 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -57,8 +57,17 @@
#define REG_DBG_PRINT(args...)
#endif
+static struct regulatory_request core_request_world = {
+ .initiator = NL80211_REGDOM_SET_BY_CORE,
+ .alpha2[0] = '0',
+ .alpha2[1] = '0',
+ .intersect = false,
+ .processed = true,
+ .country_ie_env = ENVIRON_ANY,
+};
+
/* Receipt of information from last regulatory request */
-static struct regulatory_request *last_request;
+static struct regulatory_request *last_request = &core_request_world;
/* To trigger userspace events */
static struct platform_device *reg_pdev;
@@ -150,7 +159,7 @@ static char user_alpha2[2];
module_param(ieee80211_regdom, charp, 0444);
MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
-static void reset_regdomains(void)
+static void reset_regdomains(bool full_reset)
{
/* avoid freeing static information or freeing something twice */
if (cfg80211_regdomain == cfg80211_world_regdom)
@@ -165,6 +174,13 @@ static void reset_regdomains(void)
cfg80211_world_regdom = &world_regdom;
cfg80211_regdomain = NULL;
+
+ if (!full_reset)
+ return;
+
+ if (last_request != &core_request_world)
+ kfree(last_request);
+ last_request = &core_request_world;
}
/*
@@ -175,7 +191,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd)
{
BUG_ON(!last_request);
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_world_regdom = rd;
cfg80211_regdomain = rd;
@@ -1407,7 +1423,8 @@ static int __regulatory_hint(struct wiphy *wiphy,
}
new_request:
- kfree(last_request);
+ if (last_request != &core_request_world)
+ kfree(last_request);
last_request = pending_request;
last_request->intersect = intersect;
@@ -1577,9 +1594,6 @@ static int regulatory_hint_core(const char *alpha2)
{
struct regulatory_request *request;
- kfree(last_request);
- last_request = NULL;
-
request = kzalloc(sizeof(struct regulatory_request),
GFP_KERNEL);
if (!request)
@@ -1777,7 +1791,7 @@ static void restore_regulatory_settings(bool reset_user)
mutex_lock(&cfg80211_mutex);
mutex_lock(&reg_mutex);
- reset_regdomains();
+ reset_regdomains(true);
restore_alpha2(alpha2, reset_user);
/*
@@ -2037,12 +2051,18 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
}
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
+ if (!request_wiphy &&
+ (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
+ last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+ schedule_delayed_work(&reg_timeout, 0);
+ return -ENODEV;
+ }
if (!last_request->intersect) {
int r;
if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = rd;
return 0;
}
@@ -2063,7 +2083,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
if (r)
return r;
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = rd;
return 0;
}
@@ -2088,7 +2108,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
rd = NULL;
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = intersected_rd;
return 0;
@@ -2108,7 +2128,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
kfree(rd);
rd = NULL;
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = intersected_rd;
return 0;
@@ -2261,11 +2281,8 @@ void /* __init_or_exit */ regulatory_exit(void)
mutex_lock(&cfg80211_mutex);
mutex_lock(&reg_mutex);
- reset_regdomains();
-
- kfree(last_request);
+ reset_regdomains(true);
- last_request = NULL;
dev_set_uevent_suppress(&reg_pdev->dev, true);
platform_device_unregister(reg_pdev);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 552df27dcf5..2118d644663 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2382,9 +2382,11 @@ static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
return dst_metric_advmss(dst->path);
}
-static unsigned int xfrm_default_mtu(const struct dst_entry *dst)
+static unsigned int xfrm_mtu(const struct dst_entry *dst)
{
- return dst_mtu(dst->path);
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst_mtu(dst->path);
}
static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
@@ -2411,8 +2413,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
dst_ops->check = xfrm_dst_check;
if (likely(dst_ops->default_advmss == NULL))
dst_ops->default_advmss = xfrm_default_advmss;
- if (likely(dst_ops->default_mtu == NULL))
- dst_ops->default_mtu = xfrm_default_mtu;
+ if (likely(dst_ops->mtu == NULL))
+ dst_ops->mtu = xfrm_mtu;
if (likely(dst_ops->negative_advice == NULL))
dst_ops->negative_advice = xfrm_negative_advice;
if (likely(dst_ops->link_failure == NULL))
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 36cc0cc39e7..b566eba4a65 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -57,23 +57,44 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
static int d_namespace_path(struct path *path, char *buf, int buflen,
char **name, int flags)
{
- struct path root, tmp;
char *res;
- int connected, error = 0;
+ int error = 0;
+ int connected = 1;
+
+ if (path->mnt->mnt_flags & MNT_INTERNAL) {
+ /* it's not mounted anywhere */
+ res = dentry_path(path->dentry, buf, buflen);
+ *name = res;
+ if (IS_ERR(res)) {
+ *name = buf;
+ return PTR_ERR(res);
+ }
+ if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+ strncmp(*name, "/sys/", 5) == 0) {
+ /* TODO: convert over to using a per namespace
+ * control instead of hard coded /proc
+ */
+ return prepend(name, *name - buf, "/proc", 5);
+ }
+ return 0;
+ }
- /* Get the root we want to resolve too, released below */
+ /* resolve paths relative to chroot?*/
if (flags & PATH_CHROOT_REL) {
- /* resolve paths relative to chroot */
+ struct path root;
get_fs_root(current->fs, &root);
- } else {
- /* resolve paths relative to namespace */
- root.mnt = current->nsproxy->mnt_ns->root;
- root.dentry = root.mnt->mnt_root;
- path_get(&root);
+ res = __d_path(path, &root, buf, buflen);
+ if (res && !IS_ERR(res)) {
+ /* everything's fine */
+ *name = res;
+ path_put(&root);
+ goto ok;
+ }
+ path_put(&root);
+ connected = 0;
}
- tmp = root;
- res = __d_path(path, &tmp, buf, buflen);
+ res = d_absolute_path(path, buf, buflen);
*name = res;
/* handle error conditions - and still allow a partial path to
@@ -84,7 +105,10 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
*name = buf;
goto out;
}
+ if (!our_mnt(path->mnt))
+ connected = 0;
+ok:
/* Handle two cases:
* 1. A deleted dentry && profile is not allowing mediation of deleted
* 2. On some filesystems, newly allocated dentries appear to the
@@ -97,10 +121,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
goto out;
}
- /* Determine if the path is connected to the expected root */
- connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt;
-
- /* If the path is not connected,
+ /* If the path is not connected to the expected root,
* check if it is a sysctl and handle specially else remove any
* leading / that __d_path may have returned.
* Unless
@@ -112,17 +133,9 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
* namespace root.
*/
if (!connected) {
- /* is the disconnect path a sysctl? */
- if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
- strncmp(*name, "/sys/", 5) == 0) {
- /* TODO: convert over to using a per namespace
- * control instead of hard coded /proc
- */
- error = prepend(name, *name - buf, "/proc", 5);
- } else if (!(flags & PATH_CONNECT_PATH) &&
+ if (!(flags & PATH_CONNECT_PATH) &&
!(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
- (tmp.mnt == current->nsproxy->mnt_ns->root &&
- tmp.dentry == tmp.mnt->mnt_root))) {
+ our_mnt(path->mnt))) {
/* disconnected path, don't return pathname starting
* with '/'
*/
@@ -133,8 +146,6 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
}
out:
- path_put(&root);
-
return error;
}
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 5dd5b140242..8738deff26f 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -27,20 +27,35 @@ static int evmkey_len = MAX_KEY_SIZE;
struct crypto_shash *hmac_tfm;
+static DEFINE_MUTEX(mutex);
+
static struct shash_desc *init_desc(void)
{
int rc;
struct shash_desc *desc;
if (hmac_tfm == NULL) {
+ mutex_lock(&mutex);
+ if (hmac_tfm)
+ goto out;
hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac_tfm)) {
pr_err("Can not allocate %s (reason: %ld)\n",
evm_hmac, PTR_ERR(hmac_tfm));
rc = PTR_ERR(hmac_tfm);
hmac_tfm = NULL;
+ mutex_unlock(&mutex);
+ return ERR_PTR(rc);
+ }
+ rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
+ if (rc) {
+ crypto_free_shash(hmac_tfm);
+ hmac_tfm = NULL;
+ mutex_unlock(&mutex);
return ERR_PTR(rc);
}
+out:
+ mutex_unlock(&mutex);
}
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm),
@@ -51,11 +66,7 @@ static struct shash_desc *init_desc(void)
desc->tfm = hmac_tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
- if (rc)
- goto out;
rc = crypto_shash_init(desc);
-out:
if (rc) {
kfree(desc);
return ERR_PTR(rc);
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index 0b62bd11246..7b9eb1faf68 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -123,7 +123,9 @@ static void sel_netport_insert(struct sel_netport *port)
if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
struct sel_netport *tail;
tail = list_entry(
- rcu_dereference(sel_netport_hash[idx].list.prev),
+ rcu_dereference_protected(
+ sel_netport_hash[idx].list.prev,
+ lockdep_is_held(&sel_netport_lock)),
struct sel_netport, list);
list_del_rcu(&tail->list);
kfree_rcu(tail, rcu);
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 738bbdf8d4c..d9f3ced8756 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -101,9 +101,8 @@ static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
{
char *pos = ERR_PTR(-ENOMEM);
if (buflen >= 256) {
- struct path ns_root = { };
/* go to whatever namespace root we are under */
- pos = __d_path(path, &ns_root, buffer, buflen - 1);
+ pos = d_absolute_path(path, buffer, buflen - 1);
if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
struct inode *inode = path->dentry->d_inode;
if (inode && S_ISDIR(inode->i_mode)) {
@@ -294,8 +293,16 @@ char *tomoyo_realpath_from_path(struct path *path)
pos = tomoyo_get_local_path(path->dentry, buf,
buf_len - 1);
/* Get absolute name for the rest. */
- else
+ else {
pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+ /*
+ * Fall back to local name if absolute name is not
+ * available.
+ */
+ if (pos == ERR_PTR(-EINVAL))
+ pos = tomoyo_get_local_path(path->dentry, buf,
+ buf_len - 1);
+ }
encode:
if (IS_ERR(pos))
continue;
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index e083122ca55..dbf94b189e7 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -148,7 +148,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
struct cs5535audio_dma_desc *desc =
&((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i];
desc->addr = cpu_to_le32(addr);
- desc->size = cpu_to_le32(period_bytes);
+ desc->size = cpu_to_le16(period_bytes);
desc->ctlreserved = cpu_to_le16(PRD_EOP);
desc_addr += sizeof(struct cs5535audio_dma_desc);
addr += period_bytes;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index e44b107fdc7..4562e9de6a1 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4046,9 +4046,9 @@ int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
/* Search for codec ID */
for (q = tbl; q->subvendor; q++) {
- unsigned long vendorid = (q->subdevice) | (q->subvendor << 16);
-
- if (vendorid == codec->subsystem_id)
+ unsigned int mask = 0xffff0000 | q->subdevice_mask;
+ unsigned int id = (q->subdevice | (q->subvendor << 16)) & mask;
+ if ((codec->subsystem_id & mask) == id)
break;
}
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 7ae7578bdcc..c1da422e085 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -347,18 +347,28 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
for (i = 0; i < size; i++) {
unsigned int val = hdmi_get_eld_data(codec, nid, i);
+ /*
+ * Graphics driver might be writing to ELD buffer right now.
+ * Just abort. The caller will repoll after a while.
+ */
if (!(val & AC_ELDD_ELD_VALID)) {
- if (!i) {
- snd_printd(KERN_INFO
- "HDMI: invalid ELD data\n");
- ret = -EINVAL;
- goto error;
- }
snd_printd(KERN_INFO
"HDMI: invalid ELD data byte %d\n", i);
- val = 0;
- } else
- val &= AC_ELDD_ELD_DATA;
+ ret = -EINVAL;
+ goto error;
+ }
+ val &= AC_ELDD_ELD_DATA;
+ /*
+ * The first byte cannot be zero. This can happen on some DVI
+ * connections. Some Intel chips may also need some 250ms delay
+ * to return non-zero ELD data, even when the graphics driver
+ * correctly writes ELD content before setting ELD_valid bit.
+ */
+ if (!val && !i) {
+ snd_printdd(KERN_INFO "HDMI: 0 ELD data\n");
+ ret = -EINVAL;
+ goto error;
+ }
buf[i] = val;
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 096507d2ca9..c2f79e63124 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2507,8 +2507,8 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
- SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
@@ -2971,7 +2971,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE},
+ AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
+ /* ICH */
{ PCI_DEVICE(0x8086, 0x2668),
.driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
AZX_DCAPS_BUFSIZE }, /* ICH6 */
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 2fbab8e2957..70a7abda7e2 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -58,6 +58,8 @@ struct cs_spec {
unsigned int gpio_mask;
unsigned int gpio_dir;
unsigned int gpio_data;
+ unsigned int gpio_eapd_hp; /* EAPD GPIO bit for headphones */
+ unsigned int gpio_eapd_speaker; /* EAPD GPIO bit for speakers */
struct hda_pcm pcm_rec[2]; /* PCM information */
@@ -76,6 +78,7 @@ enum {
CS420X_MBP53,
CS420X_MBP55,
CS420X_IMAC27,
+ CS420X_APPLE,
CS420X_AUTO,
CS420X_MODELS
};
@@ -928,10 +931,9 @@ static void cs_automute(struct hda_codec *codec)
spdif_present ? 0 : PIN_OUT);
}
}
- if (spec->board_config == CS420X_MBP53 ||
- spec->board_config == CS420X_MBP55 ||
- spec->board_config == CS420X_IMAC27) {
- unsigned int gpio = hp_present ? 0x02 : 0x08;
+ if (spec->gpio_eapd_hp) {
+ unsigned int gpio = hp_present ?
+ spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
snd_hda_codec_write(codec, 0x01, 0,
AC_VERB_SET_GPIO_DATA, gpio);
}
@@ -1276,6 +1278,7 @@ static const char * const cs420x_models[CS420X_MODELS] = {
[CS420X_MBP53] = "mbp53",
[CS420X_MBP55] = "mbp55",
[CS420X_IMAC27] = "imac27",
+ [CS420X_APPLE] = "apple",
[CS420X_AUTO] = "auto",
};
@@ -1285,7 +1288,13 @@ static const struct snd_pci_quirk cs420x_cfg_tbl[] = {
SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
- SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
+ /* this conflicts with too many other models */
+ /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
+ {} /* terminator */
+};
+
+static const struct snd_pci_quirk cs420x_codec_cfg_tbl[] = {
+ SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
{} /* terminator */
};
@@ -1367,6 +1376,10 @@ static int patch_cs420x(struct hda_codec *codec)
spec->board_config =
snd_hda_check_board_config(codec, CS420X_MODELS,
cs420x_models, cs420x_cfg_tbl);
+ if (spec->board_config < 0)
+ spec->board_config =
+ snd_hda_check_board_codec_sid_config(codec,
+ CS420X_MODELS, NULL, cs420x_codec_cfg_tbl);
if (spec->board_config >= 0)
fix_pincfg(codec, spec->board_config, cs_pincfgs);
@@ -1374,10 +1387,11 @@ static int patch_cs420x(struct hda_codec *codec)
case CS420X_IMAC27:
case CS420X_MBP53:
case CS420X_MBP55:
- /* GPIO1 = headphones */
- /* GPIO3 = speakers */
- spec->gpio_mask = 0x0a;
- spec->gpio_dir = 0x0a;
+ case CS420X_APPLE:
+ spec->gpio_eapd_hp = 2; /* GPIO1 = headphones */
+ spec->gpio_eapd_speaker = 8; /* GPIO3 = speakers */
+ spec->gpio_mask = spec->gpio_dir =
+ spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
break;
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 9850c5b481e..c505fd5d338 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -69,6 +69,7 @@ struct hdmi_spec_per_pin {
struct hda_codec *codec;
struct hdmi_eld sink_eld;
struct delayed_work work;
+ int repoll_count;
};
struct hdmi_spec {
@@ -748,7 +749,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
* Unsolicited events
*/
-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry);
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
{
@@ -766,7 +767,7 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
if (pin_idx < 0)
return;
- hdmi_present_sense(&spec->pins[pin_idx], true);
+ hdmi_present_sense(&spec->pins[pin_idx], 1);
}
static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -960,7 +961,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
return 0;
}
-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry)
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
{
struct hda_codec *codec = per_pin->codec;
struct hdmi_eld *eld = &per_pin->sink_eld;
@@ -989,7 +990,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry)
if (eld_valid) {
if (!snd_hdmi_get_eld(eld, codec, pin_nid))
snd_hdmi_show_eld(eld);
- else if (retry) {
+ else if (repoll) {
queue_delayed_work(codec->bus->workq,
&per_pin->work,
msecs_to_jiffies(300));
@@ -1004,7 +1005,10 @@ static void hdmi_repoll_eld(struct work_struct *work)
struct hdmi_spec_per_pin *per_pin =
container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
- hdmi_present_sense(per_pin, false);
+ if (per_pin->repoll_count++ > 6)
+ per_pin->repoll_count = 0;
+
+ hdmi_present_sense(per_pin, per_pin->repoll_count);
}
static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
@@ -1235,7 +1239,7 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
if (err < 0)
return err;
- hdmi_present_sense(per_pin, false);
+ hdmi_present_sense(per_pin, 0);
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 336d14eb72a..1d07e8fa243 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -277,6 +277,12 @@ static bool alc_dyn_adc_pcm_resetup(struct hda_codec *codec, int cur)
return false;
}
+static inline hda_nid_t get_capsrc(struct alc_spec *spec, int idx)
+{
+ return spec->capsrc_nids ?
+ spec->capsrc_nids[idx] : spec->adc_nids[idx];
+}
+
/* select the given imux item; either unmute exclusively or select the route */
static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
unsigned int idx, bool force)
@@ -291,6 +297,8 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
imux = &spec->input_mux[mux_idx];
if (!imux->num_items && mux_idx > 0)
imux = &spec->input_mux[0];
+ if (!imux->num_items)
+ return 0;
if (idx >= imux->num_items)
idx = imux->num_items - 1;
@@ -303,8 +311,7 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
adc_idx = spec->dyn_adc_idx[idx];
}
- nid = spec->capsrc_nids ?
- spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx];
+ nid = get_capsrc(spec, adc_idx);
/* no selection? */
num_conns = snd_hda_get_conn_list(codec, nid, NULL);
@@ -1054,8 +1061,19 @@ static bool alc_rebuild_imux_for_auto_mic(struct hda_codec *codec)
spec->imux_pins[2] = spec->dock_mic_pin;
for (i = 0; i < 3; i++) {
strcpy(imux->items[i].label, texts[i]);
- if (spec->imux_pins[i])
+ if (spec->imux_pins[i]) {
+ hda_nid_t pin = spec->imux_pins[i];
+ int c;
+ for (c = 0; c < spec->num_adc_nids; c++) {
+ hda_nid_t cap = get_capsrc(spec, c);
+ int idx = get_connection_index(codec, cap, pin);
+ if (idx >= 0) {
+ imux->items[i].index = idx;
+ break;
+ }
+ }
imux->num_items = i + 1;
+ }
}
spec->num_mux_defs = 1;
spec->input_mux = imux;
@@ -1957,10 +1975,8 @@ static int alc_build_controls(struct hda_codec *codec)
if (!kctl)
kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
for (i = 0; kctl && i < kctl->count; i++) {
- const hda_nid_t *nids = spec->capsrc_nids;
- if (!nids)
- nids = spec->adc_nids;
- err = snd_hda_add_nid(codec, kctl, i, nids[i]);
+ err = snd_hda_add_nid(codec, kctl, i,
+ get_capsrc(spec, i));
if (err < 0)
return err;
}
@@ -2615,6 +2631,8 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
case AUTO_PIN_SPEAKER_OUT:
if (cfg->line_outs == 1)
return "Speaker";
+ if (cfg->line_outs == 2)
+ return ch ? "Bass Speaker" : "Speaker";
break;
case AUTO_PIN_HP_OUT:
/* for multi-io case, only the primary out */
@@ -2747,8 +2765,7 @@ static int alc_auto_create_input_ctls(struct hda_codec *codec)
}
for (c = 0; c < num_adcs; c++) {
- hda_nid_t cap = spec->capsrc_nids ?
- spec->capsrc_nids[c] : spec->adc_nids[c];
+ hda_nid_t cap = get_capsrc(spec, c);
idx = get_connection_index(codec, cap, pin);
if (idx >= 0) {
spec->imux_pins[imux->num_items] = pin;
@@ -2889,7 +2906,7 @@ static hda_nid_t alc_auto_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
if (!nid)
continue;
if (found_in_nid_list(nid, spec->multiout.dac_nids,
- spec->multiout.num_dacs))
+ ARRAY_SIZE(spec->private_dac_nids)))
continue;
if (found_in_nid_list(nid, spec->multiout.hp_out_nid,
ARRAY_SIZE(spec->multiout.hp_out_nid)))
@@ -2910,6 +2927,7 @@ static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin)
return 0;
}
+/* return 0 if no possible DAC is found, 1 if one or more found */
static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
const hda_nid_t *pins, hda_nid_t *dacs)
{
@@ -2927,7 +2945,7 @@ static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
if (!dacs[i])
dacs[i] = alc_auto_look_for_dac(codec, pins[i]);
}
- return 0;
+ return 1;
}
static int alc_auto_fill_multi_ios(struct hda_codec *codec,
@@ -2937,7 +2955,7 @@ static int alc_auto_fill_multi_ios(struct hda_codec *codec,
static int alc_auto_fill_dac_nids(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- const struct auto_pin_cfg *cfg = &spec->autocfg;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
bool redone = false;
int i;
@@ -2948,6 +2966,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
spec->multiout.extra_out_nid[0] = 0;
memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
spec->multiout.dac_nids = spec->private_dac_nids;
+ spec->multi_ios = 0;
/* fill hard-wired DACs first */
if (!redone) {
@@ -2981,10 +3000,12 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
for (i = 0; i < cfg->line_outs; i++) {
if (spec->private_dac_nids[i])
spec->multiout.num_dacs++;
- else
+ else {
memmove(spec->private_dac_nids + i,
spec->private_dac_nids + i + 1,
sizeof(hda_nid_t) * (cfg->line_outs - i - 1));
+ spec->private_dac_nids[cfg->line_outs - 1] = 0;
+ }
}
if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
@@ -3006,9 +3027,28 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
if (cfg->line_out_type != AUTO_PIN_HP_OUT)
alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins,
spec->multiout.hp_out_nid);
- if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
- alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, cfg->speaker_pins,
- spec->multiout.extra_out_nid);
+ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+ int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs,
+ cfg->speaker_pins,
+ spec->multiout.extra_out_nid);
+ /* if no speaker volume is assigned, try again as the primary
+ * output
+ */
+ if (!err && cfg->speaker_outs > 0 &&
+ cfg->line_out_type == AUTO_PIN_HP_OUT) {
+ cfg->hp_outs = cfg->line_outs;
+ memcpy(cfg->hp_pins, cfg->line_out_pins,
+ sizeof(cfg->hp_pins));
+ cfg->line_outs = cfg->speaker_outs;
+ memcpy(cfg->line_out_pins, cfg->speaker_pins,
+ sizeof(cfg->speaker_pins));
+ cfg->speaker_outs = 0;
+ memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins));
+ cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
+ redone = false;
+ goto again;
+ }
+ }
return 0;
}
@@ -3158,7 +3198,8 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec,
}
static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
- hda_nid_t dac, const char *pfx)
+ hda_nid_t dac, const char *pfx,
+ int cidx)
{
struct alc_spec *spec = codec->spec;
hda_nid_t sw, vol;
@@ -3174,15 +3215,15 @@ static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
if (is_ctl_used(spec->sw_ctls, val))
return 0; /* already created */
mark_ctl_usage(spec->sw_ctls, val);
- return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, val);
+ return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, cidx, val);
}
sw = alc_look_for_out_mute_nid(codec, pin, dac);
vol = alc_look_for_out_vol_nid(codec, pin, dac);
- err = alc_auto_add_stereo_vol(codec, pfx, 0, vol);
+ err = alc_auto_add_stereo_vol(codec, pfx, cidx, vol);
if (err < 0)
return err;
- err = alc_auto_add_stereo_sw(codec, pfx, 0, sw);
+ err = alc_auto_add_stereo_sw(codec, pfx, cidx, sw);
if (err < 0)
return err;
return 0;
@@ -3223,16 +3264,21 @@ static int alc_auto_create_extra_outs(struct hda_codec *codec, int num_pins,
hda_nid_t dac = *dacs;
if (!dac)
dac = spec->multiout.dac_nids[0];
- return alc_auto_create_extra_out(codec, *pins, dac, pfx);
+ return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0);
}
if (dacs[num_pins - 1]) {
/* OK, we have a multi-output system with individual volumes */
for (i = 0; i < num_pins; i++) {
- snprintf(name, sizeof(name), "%s %s",
- pfx, channel_name[i]);
- err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
- name);
+ if (num_pins >= 3) {
+ snprintf(name, sizeof(name), "%s %s",
+ pfx, channel_name[i]);
+ err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+ name, 0);
+ } else {
+ err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+ pfx, i);
+ }
if (err < 0)
return err;
}
@@ -3694,8 +3740,7 @@ static int init_capsrc_for_pin(struct hda_codec *codec, hda_nid_t pin)
if (!pin)
return 0;
for (i = 0; i < spec->num_adc_nids; i++) {
- hda_nid_t cap = spec->capsrc_nids ?
- spec->capsrc_nids[i] : spec->adc_nids[i];
+ hda_nid_t cap = get_capsrc(spec, i);
int idx;
idx = get_connection_index(codec, cap, pin);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 470f6f286e8..616678fde48 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -215,6 +215,7 @@ struct sigmatel_spec {
unsigned int gpio_mute;
unsigned int gpio_led;
unsigned int gpio_led_polarity;
+ unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */
unsigned int vref_led;
/* stream */
@@ -1641,6 +1642,8 @@ static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
"Alienware M17x", STAC_ALIENWARE_M17X),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
"Alienware M17x", STAC_ALIENWARE_M17X),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
+ "Alienware M17x", STAC_ALIENWARE_M17X),
{} /* terminator */
};
@@ -4316,12 +4319,10 @@ static void stac_store_hints(struct hda_codec *codec)
spec->eapd_switch = val;
get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity);
if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) {
- if (spec->gpio_led <= 8) {
- spec->gpio_mask |= spec->gpio_led;
- spec->gpio_dir |= spec->gpio_led;
- if (spec->gpio_led_polarity)
- spec->gpio_data |= spec->gpio_led;
- }
+ spec->gpio_mask |= spec->gpio_led;
+ spec->gpio_dir |= spec->gpio_led;
+ if (spec->gpio_led_polarity)
+ spec->gpio_data |= spec->gpio_led;
}
}
@@ -4439,7 +4440,9 @@ static int stac92xx_init(struct hda_codec *codec)
int pinctl, def_conf;
/* power on when no jack detection is available */
- if (!spec->hp_detect) {
+ /* or when the VREF is used for controlling LED */
+ if (!spec->hp_detect ||
+ spec->vref_mute_led_nid == nid) {
stac_toggle_power_map(codec, nid, 1);
continue;
}
@@ -4911,8 +4914,14 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
if (sscanf(dev->name, "HP_Mute_LED_%d_%x",
&spec->gpio_led_polarity,
&spec->gpio_led) == 2) {
- if (spec->gpio_led < 4)
+ unsigned int max_gpio;
+ max_gpio = snd_hda_param_read(codec, codec->afg,
+ AC_PAR_GPIO_CAP);
+ max_gpio &= AC_GPIO_IO_COUNT;
+ if (spec->gpio_led < max_gpio)
spec->gpio_led = 1 << spec->gpio_led;
+ else
+ spec->vref_mute_led_nid = spec->gpio_led;
return 1;
}
if (sscanf(dev->name, "HP_Mute_LED_%d",
@@ -4920,6 +4929,12 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
set_hp_led_gpio(codec);
return 1;
}
+ /* BIOS bug: unfilled OEM string */
+ if (strstr(dev->name, "HP_Mute_LED_P_G")) {
+ set_hp_led_gpio(codec);
+ spec->gpio_led_polarity = 1;
+ return 1;
+ }
}
/*
@@ -5041,29 +5056,12 @@ static int stac92xx_pre_resume(struct hda_codec *codec)
struct sigmatel_spec *spec = codec->spec;
/* sync mute LED */
- if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
- stac_gpio_set(codec, spec->gpio_mask,
- spec->gpio_dir, spec->gpio_data);
- } else {
- stac_vrefout_set(codec,
- spec->gpio_led, spec->vref_led);
- }
- }
- return 0;
-}
-
-static int stac92xx_post_suspend(struct hda_codec *codec)
-{
- struct sigmatel_spec *spec = codec->spec;
- if (spec->gpio_led > 8) {
- /* with vref-out pin used for mute led control
- * codec AFG is prevented from D3 state, but on
- * system suspend it can (and should) be used
- */
- snd_hda_codec_read(codec, codec->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
- }
+ if (spec->vref_mute_led_nid)
+ stac_vrefout_set(codec, spec->vref_mute_led_nid,
+ spec->vref_led);
+ else if (spec->gpio_led)
+ stac_gpio_set(codec, spec->gpio_mask,
+ spec->gpio_dir, spec->gpio_data);
return 0;
}
@@ -5074,7 +5072,7 @@ static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg,
struct sigmatel_spec *spec = codec->spec;
if (power_state == AC_PWRST_D3) {
- if (spec->gpio_led > 8) {
+ if (spec->vref_mute_led_nid) {
/* with vref-out pin used for mute led control
* codec AFG is prevented from D3 state
*/
@@ -5127,7 +5125,7 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
}
}
/*polarity defines *not* muted state level*/
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
if (muted)
spec->gpio_data &= ~spec->gpio_led; /* orange */
else
@@ -5145,7 +5143,8 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
muted_lvl = spec->gpio_led_polarity ?
AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ;
spec->vref_led = muted ? muted_lvl : notmtd_lvl;
- stac_vrefout_set(codec, spec->gpio_led, spec->vref_led);
+ stac_vrefout_set(codec, spec->vref_mute_led_nid,
+ spec->vref_led);
}
return 0;
}
@@ -5659,15 +5658,13 @@ again:
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
spec->gpio_mask |= spec->gpio_led;
spec->gpio_dir |= spec->gpio_led;
spec->gpio_data |= spec->gpio_led;
} else {
codec->patch_ops.set_power_state =
stac92xx_set_power_state;
- codec->patch_ops.post_suspend =
- stac92xx_post_suspend;
}
codec->patch_ops.pre_resume = stac92xx_pre_resume;
codec->patch_ops.check_power_status =
@@ -5974,15 +5971,13 @@ again:
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
spec->gpio_mask |= spec->gpio_led;
spec->gpio_dir |= spec->gpio_led;
spec->gpio_data |= spec->gpio_led;
} else {
codec->patch_ops.set_power_state =
stac92xx_set_power_state;
- codec->patch_ops.post_suspend =
- stac92xx_post_suspend;
}
codec->patch_ops.pre_resume = stac92xx_pre_resume;
codec->patch_ops.check_power_status =
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 431c0d417ee..b5137629f8e 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -208,6 +208,7 @@ struct via_spec {
/* work to check hp jack state */
struct hda_codec *codec;
struct delayed_work vt1708_hp_work;
+ int hp_work_active;
int vt1708_jack_detect;
int vt1708_hp_present;
@@ -305,27 +306,35 @@ enum {
static void analog_low_current_mode(struct hda_codec *codec);
static bool is_aa_path_mute(struct hda_codec *codec);
-static void vt1708_start_hp_work(struct via_spec *spec)
+#define hp_detect_with_aa(codec) \
+ (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1 && \
+ !is_aa_path_mute(codec))
+
+static void vt1708_stop_hp_work(struct via_spec *spec)
{
if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
return;
- snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
- !spec->vt1708_jack_detect);
- if (!delayed_work_pending(&spec->vt1708_hp_work))
- schedule_delayed_work(&spec->vt1708_hp_work,
- msecs_to_jiffies(100));
+ if (spec->hp_work_active) {
+ snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 1);
+ cancel_delayed_work_sync(&spec->vt1708_hp_work);
+ spec->hp_work_active = 0;
+ }
}
-static void vt1708_stop_hp_work(struct via_spec *spec)
+static void vt1708_update_hp_work(struct via_spec *spec)
{
if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
return;
- if (snd_hda_get_bool_hint(spec->codec, "analog_loopback_hp_detect") == 1
- && !is_aa_path_mute(spec->codec))
- return;
- snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
- !spec->vt1708_jack_detect);
- cancel_delayed_work_sync(&spec->vt1708_hp_work);
+ if (spec->vt1708_jack_detect &&
+ (spec->active_streams || hp_detect_with_aa(spec->codec))) {
+ if (!spec->hp_work_active) {
+ snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 0);
+ schedule_delayed_work(&spec->vt1708_hp_work,
+ msecs_to_jiffies(100));
+ spec->hp_work_active = 1;
+ }
+ } else if (!hp_detect_with_aa(spec->codec))
+ vt1708_stop_hp_work(spec);
}
static void set_widgets_power_state(struct hda_codec *codec)
@@ -343,12 +352,7 @@ static int analog_input_switch_put(struct snd_kcontrol *kcontrol,
set_widgets_power_state(codec);
analog_low_current_mode(snd_kcontrol_chip(kcontrol));
- if (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1) {
- if (is_aa_path_mute(codec))
- vt1708_start_hp_work(codec->spec);
- else
- vt1708_stop_hp_work(codec->spec);
- }
+ vt1708_update_hp_work(codec->spec);
return change;
}
@@ -1154,7 +1158,7 @@ static int via_playback_multi_pcm_prepare(struct hda_pcm_stream *hinfo,
spec->cur_dac_stream_tag = stream_tag;
spec->cur_dac_format = format;
mutex_unlock(&spec->config_mutex);
- vt1708_start_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1174,7 +1178,7 @@ static int via_playback_hp_pcm_prepare(struct hda_pcm_stream *hinfo,
spec->cur_hp_stream_tag = stream_tag;
spec->cur_hp_format = format;
mutex_unlock(&spec->config_mutex);
- vt1708_start_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1188,7 +1192,7 @@ static int via_playback_multi_pcm_cleanup(struct hda_pcm_stream *hinfo,
snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
spec->active_streams &= ~STREAM_MULTI_OUT;
mutex_unlock(&spec->config_mutex);
- vt1708_stop_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1203,7 +1207,7 @@ static int via_playback_hp_pcm_cleanup(struct hda_pcm_stream *hinfo,
snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0);
spec->active_streams &= ~STREAM_INDEP_HP;
mutex_unlock(&spec->config_mutex);
- vt1708_stop_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1645,7 +1649,8 @@ static void via_hp_automute(struct hda_codec *codec)
int nums;
struct via_spec *spec = codec->spec;
- if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0])
+ if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0] &&
+ (spec->codec_type != VT1708 || spec->vt1708_jack_detect))
present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]);
if (spec->smart51_enabled)
@@ -2612,8 +2617,6 @@ static int vt1708_jack_detect_get(struct snd_kcontrol *kcontrol,
if (spec->codec_type != VT1708)
return 0;
- spec->vt1708_jack_detect =
- !((snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8) & 0x1);
ucontrol->value.integer.value[0] = spec->vt1708_jack_detect;
return 0;
}
@@ -2623,18 +2626,22 @@ static int vt1708_jack_detect_put(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct via_spec *spec = codec->spec;
- int change;
+ int val;
if (spec->codec_type != VT1708)
return 0;
- spec->vt1708_jack_detect = ucontrol->value.integer.value[0];
- change = (0x1 & (snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8))
- == !spec->vt1708_jack_detect;
- if (spec->vt1708_jack_detect) {
+ val = !!ucontrol->value.integer.value[0];
+ if (spec->vt1708_jack_detect == val)
+ return 0;
+ spec->vt1708_jack_detect = val;
+ if (spec->vt1708_jack_detect &&
+ snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") != 1) {
mute_aa_path(codec, 1);
notify_aa_path_ctls(codec);
}
- return change;
+ via_hp_automute(codec);
+ vt1708_update_hp_work(spec);
+ return 1;
}
static const struct snd_kcontrol_new vt1708_jack_detect_ctl = {
@@ -2771,6 +2778,7 @@ static int via_init(struct hda_codec *codec)
via_auto_init_unsol_event(codec);
via_hp_automute(codec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -2787,7 +2795,9 @@ static void vt1708_update_hp_jack_state(struct work_struct *work)
spec->vt1708_hp_present ^= 1;
via_hp_automute(spec->codec);
}
- vt1708_start_hp_work(spec);
+ if (spec->vt1708_jack_detect)
+ schedule_delayed_work(&spec->vt1708_hp_work,
+ msecs_to_jiffies(100));
}
static int get_mux_nids(struct hda_codec *codec)
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 5c8717e29ee..8c3e7fcefd9 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -78,10 +78,15 @@ unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
return ioread32(address);
}
-void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len)
+static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
+ u32 len)
{
- void __iomem *address = lx_dsp_register(chip, port);
- memcpy_fromio(data, address, len*sizeof(u32));
+ u32 __iomem *address = lx_dsp_register(chip, port);
+ int i;
+
+ /* we cannot use memcpy_fromio */
+ for (i = 0; i != len; ++i)
+ data[i] = ioread32(address + i);
}
@@ -91,11 +96,15 @@ void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
iowrite32(data, address);
}
-void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
- u32 len)
+static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
+ const u32 *data, u32 len)
{
- void __iomem *address = lx_dsp_register(chip, port);
- memcpy_toio(address, data, len*sizeof(u32));
+ u32 __iomem *address = lx_dsp_register(chip, port);
+ int i;
+
+ /* we cannot use memcpy_to */
+ for (i = 0; i != len; ++i)
+ iowrite32(data[i], address + i);
}
diff --git a/sound/pci/lx6464es/lx_core.h b/sound/pci/lx6464es/lx_core.h
index 1dd562980b6..4d7ff797a64 100644
--- a/sound/pci/lx6464es/lx_core.h
+++ b/sound/pci/lx6464es/lx_core.h
@@ -72,10 +72,7 @@ enum {
};
unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port);
-void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len);
void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data);
-void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
- u32 len);
/* plx register access */
enum {
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index e760adad952..19ee2203cbb 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6518,7 +6518,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
hdspm->io_type = AES32;
hdspm->card_name = "RME AES32";
hdspm->midiPorts = 2;
- } else if ((hdspm->firmware_rev == 0xd5) ||
+ } else if ((hdspm->firmware_rev == 0xd2) ||
((hdspm->firmware_rev >= 0xc8) &&
(hdspm->firmware_rev <= 0xcf))) {
hdspm->io_type = MADI;
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index a391e622a19..28dfafb56dd 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -41,6 +41,7 @@ MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");
static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
static int enable = 1;
+static int codecs = 1;
module_param(index, int, 0444);
MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator.");
@@ -48,6 +49,8 @@ module_param(id, charp, 0444);
MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator.");
module_param(enable, bool, 0444);
MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator.");
+module_param(codecs, int, 0444);
+MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)");
static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) },
@@ -140,6 +143,9 @@ struct sis7019 {
dma_addr_t silence_dma_addr;
};
+/* These values are also used by the module param 'codecs' to indicate
+ * which codecs should be present.
+ */
#define SIS_PRIMARY_CODEC_PRESENT 0x0001
#define SIS_SECONDARY_CODEC_PRESENT 0x0002
#define SIS_TERTIARY_CODEC_PRESENT 0x0004
@@ -1078,6 +1084,7 @@ static int sis_chip_init(struct sis7019 *sis)
{
unsigned long io = sis->ioport;
void __iomem *ioaddr = sis->ioaddr;
+ unsigned long timeout;
u16 status;
int count;
int i;
@@ -1104,21 +1111,45 @@ static int sis_chip_init(struct sis7019 *sis)
while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count)
udelay(1);
+ /* Command complete, we can let go of the semaphore now.
+ */
+ outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
+ if (!count)
+ return -EIO;
+
/* Now that we've finished the reset, find out what's attached.
+ * There are some codec/board combinations that take an extremely
+ * long time to come up. 350+ ms has been observed in the field,
+ * so we'll give them up to 500ms.
*/
- status = inl(io + SIS_AC97_STATUS);
- if (status & SIS_AC97_STATUS_CODEC_READY)
- sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
- if (status & SIS_AC97_STATUS_CODEC2_READY)
- sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
- if (status & SIS_AC97_STATUS_CODEC3_READY)
- sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
-
- /* All done, let go of the semaphore, and check for errors
+ sis->codecs_present = 0;
+ timeout = msecs_to_jiffies(500) + jiffies;
+ while (time_before_eq(jiffies, timeout)) {
+ status = inl(io + SIS_AC97_STATUS);
+ if (status & SIS_AC97_STATUS_CODEC_READY)
+ sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
+ if (status & SIS_AC97_STATUS_CODEC2_READY)
+ sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
+ if (status & SIS_AC97_STATUS_CODEC3_READY)
+ sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
+
+ if (sis->codecs_present == codecs)
+ break;
+
+ msleep(1);
+ }
+
+ /* All done, check for errors.
*/
- outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
- if (!sis->codecs_present || !count)
+ if (!sis->codecs_present) {
+ printk(KERN_ERR "sis7019: could not find any codecs\n");
return -EIO;
+ }
+
+ if (sis->codecs_present != codecs) {
+ printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n",
+ sis->codecs_present, codecs);
+ }
/* Let the hardware know that the audio driver is alive,
* and enable PCM slots on the AC-link for L/R playback (3 & 4) and
@@ -1390,6 +1421,17 @@ static int __devinit snd_sis7019_probe(struct pci_dev *pci,
if (!enable)
goto error_out;
+ /* The user can specify which codecs should be present so that we
+ * can wait for them to show up if they are slow to recover from
+ * the AC97 cold reset. We default to a single codec, the primary.
+ *
+ * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2.
+ */
+ codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT |
+ SIS_TERTIARY_CODEC_PRESENT;
+ if (!codecs)
+ codecs = SIS_PRIMARY_CODEC_PRESENT;
+
rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card);
if (rc < 0)
goto error_out;
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index bee3c94f58b..d1fcc816ce9 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -1,6 +1,6 @@
config SND_ATMEL_SOC
tristate "SoC Audio for the Atmel System-on-Chip"
- depends on ARCH_AT91 || AVR32
+ depends on ARCH_AT91
help
Say Y or M if you want to add support for codecs attached to
the ATMEL SSC interface. You will also need
@@ -24,25 +24,6 @@ config SND_AT91_SOC_SAM9G20_WM8731
Say Y if you want to add support for SoC audio on WM8731-based
AT91sam9g20 evaluation board.
-config SND_AT32_SOC_PLAYPAQ
- tristate "SoC Audio support for PlayPaq with WM8510"
- depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS
- select SND_ATMEL_SOC_SSC
- select SND_SOC_WM8510
- help
- Say Y or M here if you want to add support for SoC audio
- on the LRS PlayPaq.
-
-config SND_AT32_SOC_PLAYPAQ_SLAVE
- bool "Run CODEC on PlayPaq in slave mode"
- depends on SND_AT32_SOC_PLAYPAQ
- default n
- help
- Say Y if you want to run with the AT32 SSC generating the BCLK
- and FRAME signals on the PlayPaq. Unless you want to play
- with the AT32 as the SSC master, you probably want to say N here,
- as this will give you better sound quality.
-
config SND_AT91_SOC_AFEB9260
tristate "SoC Audio support for AFEB9260 board"
depends on ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index e7ea56bd5f8..a5c0bf19da7 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -8,9 +8,5 @@ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
# AT91 Machine Support
snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
-# AT32 Machine Support
-snd-soc-playpaq-objs := playpaq_wm8510.o
-
obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
-obj-$(CONFIG_SND_AT32_SOC_PLAYPAQ) += snd-soc-playpaq.o
obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c
deleted file mode 100644
index 73ae99ad457..00000000000
--- a/sound/soc/atmel/playpaq_wm8510.c
+++ /dev/null
@@ -1,473 +0,0 @@
-/* sound/soc/at32/playpaq_wm8510.c
- * ASoC machine driver for PlayPaq using WM8510 codec
- *
- * Copyright (C) 2008 Long Range Systems
- * Geoffrey Wossum <gwossum@acm.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This code is largely inspired by sound/soc/at91/eti_b1_wm8731.c
- *
- * NOTE: If you don't have the AT32 enhanced portmux configured (which
- * isn't currently in the mainline or Atmel patched kernel), you will
- * need to set the MCLK pin (PA30) to peripheral A in your board initialization
- * code. Something like:
- * at32_select_periph(GPIO_PIN_PA(30), GPIO_PERIPH_A, 0);
- *
- */
-
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <mach/at32ap700x.h>
-#include <mach/portmux.h>
-
-#include "../codecs/wm8510.h"
-#include "atmel-pcm.h"
-#include "atmel_ssc_dai.h"
-
-
-/*-------------------------------------------------------------------------*\
- * constants
-\*-------------------------------------------------------------------------*/
-#define MCLK_PIN GPIO_PIN_PA(30)
-#define MCLK_PERIPH GPIO_PERIPH_A
-
-
-/*-------------------------------------------------------------------------*\
- * data types
-\*-------------------------------------------------------------------------*/
-/* SSC clocking data */
-struct ssc_clock_data {
- /* CMR div */
- unsigned int cmr_div;
-
- /* Frame period (as needed by xCMR.PERIOD) */
- unsigned int period;
-
- /* The SSC clock rate these settings where calculated for */
- unsigned long ssc_rate;
-};
-
-
-/*-------------------------------------------------------------------------*\
- * module data
-\*-------------------------------------------------------------------------*/
-static struct clk *_gclk0;
-static struct clk *_pll0;
-
-#define CODEC_CLK (_gclk0)
-
-
-/*-------------------------------------------------------------------------*\
- * Sound SOC operations
-\*-------------------------------------------------------------------------*/
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-static struct ssc_clock_data playpaq_wm8510_calc_ssc_clock(
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *cpu_dai)
-{
- struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
- struct ssc_device *ssc = ssc_p->ssc;
- struct ssc_clock_data cd;
- unsigned int rate, width_bits, channels;
- unsigned int bitrate, ssc_div;
- unsigned actual_rate;
-
-
- /*
- * Figure out required bitrate
- */
- rate = params_rate(params);
- channels = params_channels(params);
- width_bits = snd_pcm_format_physical_width(params_format(params));
- bitrate = rate * width_bits * channels;
-
-
- /*
- * Figure out required SSC divider and period for required bitrate
- */
- cd.ssc_rate = clk_get_rate(ssc->clk);
- ssc_div = cd.ssc_rate / bitrate;
- cd.cmr_div = ssc_div / 2;
- if (ssc_div & 1) {
- /* round cmr_div up */
- cd.cmr_div++;
- }
- cd.period = width_bits - 1;
-
-
- /*
- * Find actual rate, compare to requested rate
- */
- actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1));
- pr_debug("playpaq_wm8510: Request rate = %u, actual rate = %u\n",
- rate, actual_rate);
-
-
- return cd;
-}
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-
-static int playpaq_wm8510_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
- struct ssc_device *ssc = ssc_p->ssc;
- unsigned int pll_out = 0, bclk = 0, mclk_div = 0;
- int ret;
-
-
- /* Due to difficulties with getting the correct clocks from the AT32's
- * PLL0, we're going to let the CODEC be in charge of all the clocks
- */
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
- const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM);
-#else
- struct ssc_clock_data cd;
- const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
-#endif
-
- if (ssc == NULL) {
- pr_warning("playpaq_wm8510_hw_params: ssc is NULL!\n");
- return -EINVAL;
- }
-
-
- /*
- * Figure out PLL and BCLK dividers for WM8510
- */
- switch (params_rate(params)) {
- case 48000:
- pll_out = 24576000;
- mclk_div = WM8510_MCLKDIV_2;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 44100:
- pll_out = 22579200;
- mclk_div = WM8510_MCLKDIV_2;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 22050:
- pll_out = 22579200;
- mclk_div = WM8510_MCLKDIV_4;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 16000:
- pll_out = 24576000;
- mclk_div = WM8510_MCLKDIV_6;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 11025:
- pll_out = 22579200;
- mclk_div = WM8510_MCLKDIV_8;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 8000:
- pll_out = 24576000;
- mclk_div = WM8510_MCLKDIV_12;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- default:
- pr_warning("playpaq_wm8510: Unsupported sample rate %d\n",
- params_rate(params));
- return -EINVAL;
- }
-
-
- /*
- * set CPU and CODEC DAI configuration
- */
- ret = snd_soc_dai_set_fmt(codec_dai, fmt);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: "
- "Failed to set CODEC DAI format (%d)\n",
- ret);
- return ret;
- }
- ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: "
- "Failed to set CPU DAI format (%d)\n",
- ret);
- return ret;
- }
-
-
- /*
- * Set CPU clock configuration
- */
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
- cd = playpaq_wm8510_calc_ssc_clock(params, cpu_dai);
- pr_debug("playpaq_wm8510: cmr_div = %d, period = %d\n",
- cd.cmr_div, cd.period);
- ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_CMR_DIV, cd.cmr_div);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: Failed to set CPU CMR_DIV (%d)\n",
- ret);
- return ret;
- }
- ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_TCMR_PERIOD,
- cd.period);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: "
- "Failed to set CPU transmit period (%d)\n",
- ret);
- return ret;
- }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
- /*
- * Set CODEC clock configuration
- */
- pr_debug("playpaq_wm8510: "
- "pll_in = %ld, pll_out = %u, bclk = %x, mclk = %x\n",
- clk_get_rate(CODEC_CLK), pll_out, bclk, mclk_div);
-
-
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
- ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_BCLKDIV, bclk);
- if (ret < 0) {
- pr_warning
- ("playpaq_wm8510: Failed to set CODEC DAI BCLKDIV (%d)\n",
- ret);
- return ret;
- }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
- ret = snd_soc_dai_set_pll(codec_dai, 0, 0,
- clk_get_rate(CODEC_CLK), pll_out);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: Failed to set CODEC DAI PLL (%d)\n",
- ret);
- return ret;
- }
-
-
- ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_MCLKDIV, mclk_div);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: Failed to set CODEC MCLKDIV (%d)\n",
- ret);
- return ret;
- }
-
-
- return 0;
-}
-
-
-
-static struct snd_soc_ops playpaq_wm8510_ops = {
- .hw_params = playpaq_wm8510_hw_params,
-};
-
-
-
-static const struct snd_soc_dapm_widget playpaq_dapm_widgets[] = {
- SND_SOC_DAPM_MIC("Int Mic", NULL),
- SND_SOC_DAPM_SPK("Ext Spk", NULL),
-};
-
-
-
-static const struct snd_soc_dapm_route intercon[] = {
- /* speaker connected to SPKOUT */
- {"Ext Spk", NULL, "SPKOUTP"},
- {"Ext Spk", NULL, "SPKOUTN"},
-
- {"Mic Bias", NULL, "Int Mic"},
- {"MICN", NULL, "Mic Bias"},
- {"MICP", NULL, "Mic Bias"},
-};
-
-
-
-static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
- int i;
-
- /*
- * Add DAPM widgets
- */
- for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
- snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
-
-
-
- /*
- * Setup audio path interconnects
- */
- snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
-
-
-
- /* always connected pins */
- snd_soc_dapm_enable_pin(dapm, "Int Mic");
- snd_soc_dapm_enable_pin(dapm, "Ext Spk");
-
-
-
- /* Make CSB show PLL rate */
- snd_soc_dai_set_clkdiv(rtd->codec_dai, WM8510_OPCLKDIV,
- WM8510_OPCLKDIV_1 | 4);
-
- return 0;
-}
-
-
-
-static struct snd_soc_dai_link playpaq_wm8510_dai = {
- .name = "WM8510",
- .stream_name = "WM8510 PCM",
- .cpu_dai_name= "atmel-ssc-dai.0",
- .platform_name = "atmel-pcm-audio",
- .codec_name = "wm8510-codec.0-0x1a",
- .codec_dai_name = "wm8510-hifi",
- .init = playpaq_wm8510_init,
- .ops = &playpaq_wm8510_ops,
-};
-
-
-
-static struct snd_soc_card snd_soc_playpaq = {
- .name = "LRS_PlayPaq_WM8510",
- .dai_link = &playpaq_wm8510_dai,
- .num_links = 1,
-};
-
-static struct platform_device *playpaq_snd_device;
-
-
-static int __init playpaq_asoc_init(void)
-{
- int ret = 0;
-
- /*
- * Configure MCLK for WM8510
- */
- _gclk0 = clk_get(NULL, "gclk0");
- if (IS_ERR(_gclk0)) {
- _gclk0 = NULL;
- ret = PTR_ERR(_gclk0);
- goto err_gclk0;
- }
- _pll0 = clk_get(NULL, "pll0");
- if (IS_ERR(_pll0)) {
- _pll0 = NULL;
- ret = PTR_ERR(_pll0);
- goto err_pll0;
- }
- ret = clk_set_parent(_gclk0, _pll0);
- if (ret) {
- pr_warning("snd-soc-playpaq: "
- "Failed to set PLL0 as parent for DAC clock\n");
- goto err_set_clk;
- }
- clk_set_rate(CODEC_CLK, 12000000);
- clk_enable(CODEC_CLK);
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
- at32_select_periph(MCLK_PIN, MCLK_PERIPH, 0);
-#endif
-
-
- /*
- * Create and register platform device
- */
- playpaq_snd_device = platform_device_alloc("soc-audio", 0);
- if (playpaq_snd_device == NULL) {
- ret = -ENOMEM;
- goto err_device_alloc;
- }
-
- platform_set_drvdata(playpaq_snd_device, &snd_soc_playpaq);
-
- ret = platform_device_add(playpaq_snd_device);
- if (ret) {
- pr_warning("playpaq_wm8510: platform_device_add failed (%d)\n",
- ret);
- goto err_device_add;
- }
-
- return 0;
-
-
-err_device_add:
- if (playpaq_snd_device != NULL) {
- platform_device_put(playpaq_snd_device);
- playpaq_snd_device = NULL;
- }
-err_device_alloc:
-err_set_clk:
- if (_pll0 != NULL) {
- clk_put(_pll0);
- _pll0 = NULL;
- }
-err_pll0:
- if (_gclk0 != NULL) {
- clk_put(_gclk0);
- _gclk0 = NULL;
- }
- return ret;
-}
-
-
-static void __exit playpaq_asoc_exit(void)
-{
- if (_gclk0 != NULL) {
- clk_put(_gclk0);
- _gclk0 = NULL;
- }
- if (_pll0 != NULL) {
- clk_put(_pll0);
- _pll0 = NULL;
- }
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
- at32_free_pin(MCLK_PIN);
-#endif
-
- platform_device_unregister(playpaq_snd_device);
- playpaq_snd_device = NULL;
-}
-
-module_init(playpaq_asoc_init);
-module_exit(playpaq_asoc_exit);
-
-MODULE_AUTHOR("Geoffrey Wossum <gwossum@acm.org>");
-MODULE_DESCRIPTION("ASoC machine driver for LRS PlayPaq");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 4584514d93d..fa787d45d74 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -33,7 +33,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_CX20442
select SND_SOC_DA7210 if I2C
select SND_SOC_DFBMCS320
- select SND_SOC_JZ4740_CODEC if SOC_JZ4740
+ select SND_SOC_JZ4740_CODEC
select SND_SOC_LM4857 if I2C
select SND_SOC_MAX98088 if I2C
select SND_SOC_MAX98095 if I2C
diff --git a/sound/soc/codecs/ad1836.h b/sound/soc/codecs/ad1836.h
index 444747f0db2..dd7be0dbbc5 100644
--- a/sound/soc/codecs/ad1836.h
+++ b/sound/soc/codecs/ad1836.h
@@ -34,7 +34,7 @@
#define AD1836_ADC_CTRL2 13
#define AD1836_ADC_WORD_LEN_MASK 0x30
-#define AD1836_ADC_WORD_OFFSET 5
+#define AD1836_ADC_WORD_OFFSET 4
#define AD1836_ADC_SERFMT_MASK (7 << 6)
#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6)
#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6)
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index 1ccf8dd4757..45c63028b40 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -245,7 +245,7 @@ static const char *adau1373_bass_hpf_cutoff_text[] = {
};
static const unsigned int adau1373_bass_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(3),
0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1),
3, 4, TLV_DB_SCALE_ITEM(950, 250, 0),
5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0),
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index f1f237ecec2..73f46eb459f 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -601,7 +601,6 @@ static int cs4270_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
static int cs4270_soc_resume(struct snd_soc_codec *codec)
{
struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
- struct i2c_client *i2c_client = to_i2c_client(codec->dev);
int reg;
regulator_bulk_enable(ARRAY_SIZE(cs4270->supplies),
@@ -612,14 +611,7 @@ static int cs4270_soc_resume(struct snd_soc_codec *codec)
ndelay(500);
/* first restore the entire register cache ... */
- for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) {
- u8 val = snd_soc_read(codec, reg);
-
- if (i2c_smbus_write_byte_data(i2c_client, reg, val)) {
- dev_err(codec->dev, "i2c write failed\n");
- return -EIO;
- }
- }
+ snd_soc_cache_sync(codec);
/* ... then disable the power-down bits */
reg = snd_soc_read(codec, CS4270_PWRCTL);
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 23d1bd5dadd..69fde1506fe 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -434,7 +434,8 @@ static int cs4271_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
{
int ret;
/* Set power-down bit */
- ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, CS4271_MODE2_PDN);
+ ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN,
+ CS4271_MODE2_PDN);
if (ret < 0)
return ret;
return 0;
@@ -501,8 +502,9 @@ static int cs4271_probe(struct snd_soc_codec *codec)
return ret;
}
- ret = snd_soc_update_bits(codec, CS4271_MODE2, 0,
- CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
+ ret = snd_soc_update_bits(codec, CS4271_MODE2,
+ CS4271_MODE2_PDN | CS4271_MODE2_CPEN,
+ CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
if (ret < 0)
return ret;
ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0);
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index 8c3c8205d19..1ee66361f61 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -555,7 +555,7 @@ static int cs42l51_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_device_cs42l51 = {
.probe = cs42l51_probe,
- .reg_cache_size = CS42L51_NUMREGS,
+ .reg_cache_size = CS42L51_NUMREGS + 1,
.reg_word_size = sizeof(u8),
};
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index e373f8f0690..3e1f4e172bf 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/delay.h>
diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c
index 9e7e964a5fa..dcf6f2a1600 100644
--- a/sound/soc/codecs/max9877.c
+++ b/sound/soc/codecs/max9877.c
@@ -106,13 +106,13 @@ static int max9877_set_2reg(struct snd_kcontrol *kcontrol,
unsigned int mask = mc->max;
unsigned int val = (ucontrol->value.integer.value[0] & mask);
unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
- unsigned int change = 1;
+ unsigned int change = 0;
- if (((max9877_regs[reg] >> shift) & mask) == val)
- change = 0;
+ if (((max9877_regs[reg] >> shift) & mask) != val)
+ change = 1;
- if (((max9877_regs[reg2] >> shift) & mask) == val2)
- change = 0;
+ if (((max9877_regs[reg2] >> shift) & mask) != val2)
+ change = 1;
if (change) {
max9877_regs[reg] &= ~(mask << shift);
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 27a078cbb6e..4646e808b90 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -177,7 +177,7 @@ static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0);
static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */
static unsigned int mic_bst_tlv[] = {
- TLV_DB_RANGE_HEAD(6),
+ TLV_DB_RANGE_HEAD(7),
0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d15695d1c27..bbcf921166f 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -365,7 +365,7 @@ static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
/* tlv for mic gain, 0db 20db 30db 40db */
static const unsigned int mic_gain_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(2),
0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
};
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index bb82408ab8e..d2f37152f94 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -76,6 +76,8 @@ struct sta32x_priv {
unsigned int mclk;
unsigned int format;
+
+ u32 coef_shadow[STA32X_COEF_COUNT];
};
static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1);
@@ -227,6 +229,7 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
int numcoef = kcontrol->private_value >> 16;
int index = kcontrol->private_value & 0xffff;
unsigned int cfud;
@@ -239,6 +242,11 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
snd_soc_write(codec, STA32X_CFUD, cfud);
snd_soc_write(codec, STA32X_CFADDR2, index);
+ for (i = 0; i < numcoef && (index + i < STA32X_COEF_COUNT); i++)
+ sta32x->coef_shadow[index + i] =
+ (ucontrol->value.bytes.data[3 * i] << 16)
+ | (ucontrol->value.bytes.data[3 * i + 1] << 8)
+ | (ucontrol->value.bytes.data[3 * i + 2]);
for (i = 0; i < 3 * numcoef; i++)
snd_soc_write(codec, STA32X_B1CF1 + i,
ucontrol->value.bytes.data[i]);
@@ -252,6 +260,48 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
return 0;
}
+int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
+{
+ struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
+ unsigned int cfud;
+ int i;
+
+ /* preserve reserved bits in STA32X_CFUD */
+ cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0;
+
+ for (i = 0; i < STA32X_COEF_COUNT; i++) {
+ snd_soc_write(codec, STA32X_CFADDR2, i);
+ snd_soc_write(codec, STA32X_B1CF1,
+ (sta32x->coef_shadow[i] >> 16) & 0xff);
+ snd_soc_write(codec, STA32X_B1CF2,
+ (sta32x->coef_shadow[i] >> 8) & 0xff);
+ snd_soc_write(codec, STA32X_B1CF3,
+ (sta32x->coef_shadow[i]) & 0xff);
+ /* chip documentation does not say if the bits are
+ * self-clearing, so do it explicitly */
+ snd_soc_write(codec, STA32X_CFUD, cfud);
+ snd_soc_write(codec, STA32X_CFUD, cfud | 0x01);
+ }
+ return 0;
+}
+
+int sta32x_cache_sync(struct snd_soc_codec *codec)
+{
+ unsigned int mute;
+ int rc;
+
+ if (!codec->cache_sync)
+ return 0;
+
+ /* mute during register sync */
+ mute = snd_soc_read(codec, STA32X_MMUTE);
+ snd_soc_write(codec, STA32X_MMUTE, mute | STA32X_MMUTE_MMUTE);
+ sta32x_sync_coef_shadow(codec);
+ rc = snd_soc_cache_sync(codec);
+ snd_soc_write(codec, STA32X_MMUTE, mute);
+ return rc;
+}
+
#define SINGLE_COEF(xname, index) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = sta32x_coefficient_info, \
@@ -661,7 +711,7 @@ static int sta32x_set_bias_level(struct snd_soc_codec *codec,
return ret;
}
- snd_soc_cache_sync(codec);
+ sta32x_cache_sync(codec);
}
/* Power up to mute */
@@ -790,6 +840,17 @@ static int sta32x_probe(struct snd_soc_codec *codec)
STA32X_CxCFG_OM_MASK,
2 << STA32X_CxCFG_OM_SHIFT);
+ /* initialize coefficient shadow RAM with reset values */
+ for (i = 4; i <= 49; i += 5)
+ sta32x->coef_shadow[i] = 0x400000;
+ for (i = 50; i <= 54; i++)
+ sta32x->coef_shadow[i] = 0x7fffff;
+ sta32x->coef_shadow[55] = 0x5a9df7;
+ sta32x->coef_shadow[56] = 0x7fffff;
+ sta32x->coef_shadow[59] = 0x7fffff;
+ sta32x->coef_shadow[60] = 0x400000;
+ sta32x->coef_shadow[61] = 0x400000;
+
sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Bias level configuration will have done an extra enable */
regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
diff --git a/sound/soc/codecs/sta32x.h b/sound/soc/codecs/sta32x.h
index b97ee5a7566..d8e32a6262e 100644
--- a/sound/soc/codecs/sta32x.h
+++ b/sound/soc/codecs/sta32x.h
@@ -19,6 +19,7 @@
/* STA326 register addresses */
#define STA32X_REGISTER_COUNT 0x2d
+#define STA32X_COEF_COUNT 62
#define STA32X_CONFA 0x00
#define STA32X_CONFB 0x01
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index c5ca8cfea60..0441893e270 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -863,13 +863,13 @@ static struct i2c_driver uda1380_i2c_driver = {
static int __init uda1380_modinit(void)
{
- int ret;
+ int ret = 0;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&uda1380_i2c_driver);
if (ret != 0)
pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
#endif
- return 0;
+ return ret;
}
module_init(uda1380_modinit);
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 7e5ec03f6f8..a7c9ae17fc7 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -453,6 +453,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
snd_soc_write(codec, WM8731_PWR, 0xffff);
regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
wm8731->supplies);
+ codec->cache_sync = 1;
break;
}
codec->dapm.bias_level = level;
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index a9504710bb6..3a629d0d690 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -190,6 +190,9 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
u16 ioctl;
+ if (wm8753->dai_func == ucontrol->value.integer.value[0])
+ return 0;
+
if (codec->active)
return -EBUSY;
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 0293763debe..5a14d5c0e0e 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -60,6 +60,8 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
}
if (memcmp(fw->data, "WMFW", 4) != 0) {
+ memcpy(&data32, fw->data, sizeof(data32));
+ data32 = be32_to_cpu(data32);
dev_err(codec->dev, "%s: firmware has bad file magic %08x\n",
name, data32);
goto err;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 91d3c6dbeba..53edd9a8c75 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1973,7 +1973,7 @@ static int wm8962_reset(struct snd_soc_codec *codec)
static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0);
static const unsigned int mixinpga_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(5),
0, 1, TLV_DB_SCALE_ITEM(0, 600, 0),
2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0),
3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0),
@@ -1988,7 +1988,7 @@ static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0);
static const unsigned int classd_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(2),
0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
};
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index eec8e143511..d1a142f48b0 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -512,7 +512,7 @@ static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
static const unsigned int drc_max_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(2),
0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
};
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 9c982e47eb9..d0c545b73d7 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1325,15 +1325,15 @@ SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
};
static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
- adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
- adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+ adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+ adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
};
static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
};
static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
@@ -2357,6 +2357,11 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT;
lrclk = bclk_rate / params_rate(params);
+ if (!lrclk) {
+ dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n",
+ bclk_rate);
+ return -EINVAL;
+ }
dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
lrclk, bclk_rate / lrclk);
@@ -3178,6 +3183,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
switch (wm8994->revision) {
case 0:
case 1:
+ case 2:
+ case 3:
wm8994->hubs.dcs_codes_l = -9;
wm8994->hubs.dcs_codes_r = -5;
break;
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 645c980d6b8..a33b04d1719 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -1968,6 +1968,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
break;
case 24576000:
ratediv = WM8996_SYSCLK_DIV;
+ wm8996->sysclk /= 2;
case 12288000:
snd_soc_update_bits(codec, WM8996_AIF_RATE,
WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 3cd35a02c28..4a398c3bfe8 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -807,7 +807,6 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
mdelay(100);
/* Normal bias enable & soft start off */
- reg |= WM9081_BIAS_ENA;
reg &= ~WM9081_VMID_RAMP;
snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
@@ -818,7 +817,7 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
}
/* VMID 2*240k */
- reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
+ reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
reg &= ~WM9081_VMID_SEL_MASK;
reg |= 0x04;
snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
@@ -830,14 +829,15 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
break;
case SND_SOC_BIAS_OFF:
- /* Startup bias source */
+ /* Startup bias source and disable bias */
reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
reg |= WM9081_BIAS_SRC;
+ reg &= ~WM9081_BIAS_ENA;
snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
- /* Disable VMID and biases with soft ramping */
+ /* Disable VMID with soft ramping */
reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
- reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA);
+ reg &= ~WM9081_VMID_SEL_MASK;
reg |= WM9081_VMID_RAMP;
snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 2b5252c9e37..f94c06057c6 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -177,19 +177,19 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec)
}
static const unsigned int in_tlv[] = {
- TLV_DB_RANGE_HEAD(6),
+ TLV_DB_RANGE_HEAD(3),
0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0),
1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0),
4, 6, TLV_DB_SCALE_ITEM(600, 600, 0),
};
static const unsigned int mix_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(2),
0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0),
3, 3, TLV_DB_SCALE_ITEM(0, 0, 0),
};
static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
static const unsigned int spkboost_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(2),
0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
};
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 84f33d4ea2c..48e61e91240 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -40,7 +40,7 @@ static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
static const unsigned int spkboost_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(2),
0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
};
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 0268cf98973..83c4bd5b2dd 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -694,6 +694,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev)
/* Initialize the the device_attribute structure */
dev_attr = &ssi_private->dev_attr;
+ sysfs_attr_init(&dev_attr->attr);
dev_attr->attr.name = "statistics";
dev_attr->attr.mode = S_IRUGO;
dev_attr->show = fsl_sysfs_ssi_show;
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index 31af405bda8..ae49f1c78c6 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -392,7 +392,8 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
}
if (strcasecmp(sprop, "i2s-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
@@ -409,31 +410,38 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
}
machine_data->clk_frequency = be32_to_cpup(iprop);
} else if (strcasecmp(sprop, "i2s-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else if (strcasecmp(sprop, "lj-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
} else if (strcasecmp(sprop, "lj-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else if (strcasecmp(sprop, "rj-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
} else if (strcasecmp(sprop, "rj-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else if (strcasecmp(sprop, "ac97-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
} else if (strcasecmp(sprop, "ac97-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else {
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig
index b133bfcc584..738391757f2 100644
--- a/sound/soc/imx/Kconfig
+++ b/sound/soc/imx/Kconfig
@@ -28,7 +28,7 @@ config SND_MXC_SOC_WM1133_EV1
config SND_SOC_MX27VIS_AIC32X4
tristate "SoC audio support for Visstrim M10 boards"
- depends on MACH_IMX27_VISSTRIM_M10
+ depends on MACH_IMX27_VISSTRIM_M10 && I2C
select SND_SOC_TLV320AIC32X4
select SND_MXC_SOC_MX2
help
diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig
index 8f49e165f4d..c62d715235e 100644
--- a/sound/soc/kirkwood/Kconfig
+++ b/sound/soc/kirkwood/Kconfig
@@ -12,6 +12,7 @@ config SND_KIRKWOOD_SOC_I2S
config SND_KIRKWOOD_SOC_OPENRD
tristate "SoC Audio support for Kirkwood Openrd Client"
depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
+ depends on I2C
select SND_KIRKWOOD_SOC_I2S
select SND_SOC_CS42L51
help
@@ -20,7 +21,7 @@ config SND_KIRKWOOD_SOC_OPENRD
config SND_KIRKWOOD_SOC_T5325
tristate "SoC Audio support for HP t5325"
- depends on SND_KIRKWOOD_SOC && MACH_T5325
+ depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
select SND_KIRKWOOD_SOC_I2S
select SND_SOC_ALC5623
help
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index dea5aa4aa64..f39d7dd9fbc 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -357,3 +357,6 @@ static void __exit snd_mxs_pcm_exit(void)
platform_driver_unregister(&mxs_pcm_driver);
}
module_exit(snd_mxs_pcm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-pcm-audio");
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 7fbeaec06eb..1c57f6630a4 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -171,3 +171,4 @@ module_exit(mxs_sgtl5000_exit);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MXS ALSA SoC Machine driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-sgtl5000");
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index 9c0edad90d8..a4e3237956e 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -365,7 +365,8 @@ static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev)
if (ret)
goto out3;
- mfp_set_groupg(nuc900_audio->dev); /* enbale ac97 multifunction pin*/
+ /* enbale ac97 multifunction pin */
+ mfp_set_groupg(nuc900_audio->dev, "nuc900-audio");
return 0;
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index ffd2242e305..a0f7d3cfa47 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -151,6 +151,7 @@ config SND_SOC_ZYLONITE
config SND_SOC_RAUMFELD
tristate "SoC Audio support Raumfeld audio adapter"
depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR)
+ depends on I2C && SPI_MASTER
select SND_PXA_SOC_SSP
select SND_SOC_CS4270
select SND_SOC_AK4104
@@ -159,7 +160,7 @@ config SND_SOC_RAUMFELD
config SND_PXA2XX_SOC_HX4700
tristate "SoC Audio support for HP iPAQ hx4700"
- depends on SND_PXA2XX_SOC && MACH_H4700
+ depends on SND_PXA2XX_SOC && MACH_H4700 && I2C
select SND_PXA2XX_SOC_I2S
select SND_SOC_AK4641
help
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index 65c124831a0..c664e33fb6d 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -209,9 +209,10 @@ static int __devinit hx4700_audio_probe(struct platform_device *pdev)
snd_soc_card_hx4700.dev = &pdev->dev;
ret = snd_soc_register_card(&snd_soc_card_hx4700);
if (ret)
- return ret;
+ gpio_free_array(hx4700_audio_gpios,
+ ARRAY_SIZE(hx4700_audio_gpios));
- return 0;
+ return ret;
}
static int __devexit hx4700_audio_remove(struct platform_device *pdev)
diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
index 1826acf20f7..8e523fd9189 100644
--- a/sound/soc/samsung/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -101,7 +101,6 @@ static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
- int err;
/* These endpoints are not being used. */
snd_soc_dapm_nc_pin(dapm, "LINPUT2");
@@ -131,7 +130,7 @@ static struct snd_soc_card snd_soc_machine_jive = {
.dai_link = &jive_dai,
.num_links = 1,
- .dapm_widgtets = wm8750_dapm_widgets,
+ .dapm_widgets = wm8750_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
diff --git a/sound/soc/samsung/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
index 3a0dbfc793f..8bd1dc5706b 100644
--- a/sound/soc/samsung/smdk2443_wm9710.c
+++ b/sound/soc/samsung/smdk2443_wm9710.c
@@ -12,6 +12,7 @@
*
*/
+#include <linux/module.h>
#include <sound/soc.h>
static struct snd_soc_card smdk2443;
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
index f75e43997d5..ad9ac42522e 100644
--- a/sound/soc/samsung/smdk_wm8994.c
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -9,6 +9,7 @@
#include "../codecs/wm8994.h"
#include <sound/pcm_params.h>
+#include <linux/module.h>
/*
* Default CFG switch settings to use this driver:
diff --git a/sound/soc/samsung/speyside.c b/sound/soc/samsung/speyside.c
index 85bf541a771..4b8e35410eb 100644
--- a/sound/soc/samsung/speyside.c
+++ b/sound/soc/samsung/speyside.c
@@ -191,7 +191,7 @@ static int speyside_late_probe(struct snd_soc_card *card)
snd_soc_dapm_ignore_suspend(&card->dapm, "Headset Mic");
snd_soc_dapm_ignore_suspend(&card->dapm, "Main AMIC");
snd_soc_dapm_ignore_suspend(&card->dapm, "Main DMIC");
- snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
+ snd_soc_dapm_ignore_suspend(&card->dapm, "Main Speaker");
snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Output");
snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Input");
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index a5d3685a5d3..a25fa63ce9a 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -709,6 +709,12 @@ int snd_soc_resume(struct device *dev)
struct snd_soc_card *card = dev_get_drvdata(dev);
int i, ac97_control = 0;
+ /* If the initialization of this soc device failed, there is no codec
+ * associated with it. Just bail out in this case.
+ */
+ if (list_empty(&card->codec_dev_list))
+ return 0;
+
/* AC97 devices might have other drivers hanging off them so
* need to resume immediately. Other drivers don't have that
* problem and may take a substantial amount of time to resume
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 0c12b98484b..4220bb0f273 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -58,7 +58,36 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
}
EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
-static struct snd_soc_platform_driver dummy_platform;
+static const struct snd_pcm_hardware dummy_dma_hardware = {
+ .formats = 0xffffffff,
+ .channels_min = 1,
+ .channels_max = UINT_MAX,
+
+ /* Random values to keep userspace happy when checking constraints */
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+ .buffer_bytes_max = 128*1024,
+ .period_bytes_min = PAGE_SIZE,
+ .period_bytes_max = PAGE_SIZE*2,
+ .periods_min = 2,
+ .periods_max = 128,
+};
+
+static int dummy_dma_open(struct snd_pcm_substream *substream)
+{
+ snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+
+ return 0;
+}
+
+static struct snd_pcm_ops dummy_dma_ops = {
+ .open = dummy_dma_open,
+ .ioctl = snd_pcm_lib_ioctl,
+};
+
+static struct snd_soc_platform_driver dummy_platform = {
+ .ops = &dummy_dma_ops,
+};
static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
{
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index b61945f3af9..32d2a21f2e3 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1633,6 +1633,37 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
{
+ /* Roland GAIA SH-01 */
+ USB_DEVICE(0x0582, 0x0111),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Roland",
+ .product_name = "GAIA",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = &(const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0003
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+{
USB_DEVICE(0x0582, 0x0113),
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
/* .vendor_name = "BOSS", */
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7d98676808d..955930e0a5c 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -463,7 +463,8 @@ static int run_perf_stat(int argc __used, const char **argv)
list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter, first) < 0) {
- if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
+ if (errno == EINVAL || errno == ENOSYS ||
+ errno == ENOENT || errno == EOPNOTSUPP) {
if (verbose)
ui__warning("%s event is not supported by the kernel.\n",
event_name(counter));
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e4262642258..d7915d4e77c 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -34,6 +34,16 @@ int __perf_evsel__sample_size(u64 sample_type)
return size;
}
+static void hists__init(struct hists *hists)
+{
+ memset(hists, 0, sizeof(*hists));
+ hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+ hists->entries_in = &hists->entries_in_array[0];
+ hists->entries_collapsed = RB_ROOT;
+ hists->entries = RB_ROOT;
+ pthread_mutex_init(&hists->lock, NULL);
+}
+
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx)
{
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index bcd05d05b4f..33c17a2b2a8 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -388,7 +388,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
/*
* write event string as passed on cmdline
*/
- ret = do_write_string(fd, attr->name);
+ ret = do_write_string(fd, event_name(attr));
if (ret < 0)
return ret;
/*
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index a36a3fa81ff..abef2703cd2 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1211,13 +1211,3 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
return ret;
}
-
-void hists__init(struct hists *hists)
-{
- memset(hists, 0, sizeof(*hists));
- hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
- hists->entries_in = &hists->entries_in_array[0];
- hists->entries_collapsed = RB_ROOT;
- hists->entries = RB_ROOT;
- pthread_mutex_init(&hists->lock, NULL);
-}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index c86c1d27bd1..89289c8e935 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -63,8 +63,6 @@ struct hists {
struct callchain_cursor callchain_cursor;
};
-void hists__init(struct hists *hists);
-
struct hist_entry *__hists__add_entry(struct hists *self,
struct addr_location *al,
struct symbol *parent, u64 period);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 85c1e6b76f0..0f4555ce906 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1333,6 +1333,10 @@ int perf_session__cpu_bitmap(struct perf_session *session,
}
map = cpu_map__new(cpu_list);
+ if (map == NULL) {
+ pr_err("Invalid cpu_list\n");
+ return -1;
+ }
for (i = 0; i < map->nr; i++) {
int cpu = map->map[i];
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 0a7ed5b5e28..6c164dc9ee9 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -1537,6 +1537,8 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
field = malloc_or_die(sizeof(*field));
type = process_arg(event, field, &token);
+ while (type == EVENT_OP)
+ type = process_op(event, field, &token);
if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free;